xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 503663c6daafffe652fa360bde17243568cd6d2a)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_types.h"
31 #include "dp_internal.h"
32 #include "dp_tx.h"
33 #include "dp_tx_desc.h"
34 #include "dp_rx.h"
35 #include "dp_rx_mon.h"
36 #ifdef DP_RATETABLE_SUPPORT
37 #include "dp_ratetable.h"
38 #endif
39 #include <cdp_txrx_handle.h>
40 #include <wlan_cfg.h>
41 #include "cdp_txrx_cmn_struct.h"
42 #include "cdp_txrx_stats_struct.h"
43 #include "cdp_txrx_cmn_reg.h"
44 #include <qdf_util.h>
45 #include "dp_peer.h"
46 #include "dp_rx_mon.h"
47 #include "htt_stats.h"
48 #include "dp_htt.h"
49 #include "htt_ppdu_stats.h"
50 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
51 #include "cfg_ucfg_api.h"
52 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
53 #include "cdp_txrx_flow_ctrl_v2.h"
54 #else
55 static inline void
56 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
57 {
58 	return;
59 }
60 #endif
61 #include "dp_ipa.h"
62 #include "dp_cal_client_api.h"
63 #ifdef FEATURE_WDS
64 #include "dp_txrx_wds.h"
65 #endif
66 #ifdef ATH_SUPPORT_IQUE
67 #include "dp_txrx_me.h"
68 #endif
69 #if defined(DP_CON_MON)
70 #ifndef REMOVE_PKT_LOG
71 #include <pktlog_ac_api.h>
72 #include <pktlog_ac.h>
73 #endif
74 #endif
75 
76 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
77 /*
78  * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
79  * also should be updated accordingly
80  */
81 QDF_COMPILE_TIME_ASSERT(num_intr_grps,
82 			HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
83 
84 /*
85  * HIF_EVENT_HIST_MAX should always be power of 2
86  */
87 QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
88 			(HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
89 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
90 
91 /*
92  * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
93  * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
94  */
95 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
96 			WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
97 			WLAN_CFG_INT_NUM_CONTEXTS);
98 
99 #ifdef WLAN_RX_PKT_CAPTURE_ENH
100 #include "dp_rx_mon_feature.h"
101 #else
102 /*
103  * dp_config_enh_rx_capture()- API to enable/disable enhanced rx capture
104  * @pdev_handle: DP_PDEV handle
105  * @val: user provided value
106  *
107  * Return: QDF_STATUS
108  */
109 static QDF_STATUS
110 dp_config_enh_rx_capture(struct cdp_pdev *pdev_handle, uint8_t val)
111 {
112 	return QDF_STATUS_E_INVAL;
113 }
114 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
115 
116 #ifdef WLAN_TX_PKT_CAPTURE_ENH
117 #include "dp_tx_capture.h"
118 #else
119 /*
120  * dp_config_enh_tx_capture()- API to enable/disable enhanced tx capture
121  * @pdev_handle: DP_PDEV handle
122  * @val: user provided value
123  *
124  * Return: QDF_STATUS
125  */
126 static QDF_STATUS
127 dp_config_enh_tx_capture(struct cdp_pdev *pdev_handle, uint8_t val)
128 {
129 	return QDF_STATUS_E_INVAL;
130 }
131 #endif
132 
133 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
134 		  struct hif_opaque_softc *hif_handle);
135 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
136 static struct dp_soc *
137 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, HTC_HANDLE htc_handle,
138 	      qdf_device_t qdf_osdev,
139 	      struct ol_if_ops *ol_ops, uint16_t device_id);
140 static void dp_pktlogmod_exit(struct dp_pdev *handle);
141 static inline void *dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl,
142 					 uint8_t vdev_id,
143 					 uint8_t *peer_mac_addr);
144 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
145 				       uint8_t *peer_mac, uint32_t bitmap);
146 static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
147 static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
148 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
149 				bool unmap_only);
150 #ifdef ENABLE_VERBOSE_DEBUG
151 bool is_dp_verbose_debug_enabled;
152 #endif
153 
154 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
155 					    enum hal_ring_type ring_type,
156 					    int ring_num);
157 #define DP_INTR_POLL_TIMER_MS	10
158 /* Generic AST entry aging timer value */
159 #define DP_AST_AGING_TIMER_DEFAULT_MS	1000
160 #define DP_MCS_LENGTH (6*MAX_MCS)
161 
162 #define DP_CURR_FW_STATS_AVAIL 19
163 #define DP_HTT_DBG_EXT_STATS_MAX 256
164 #define DP_MAX_SLEEP_TIME 100
165 #ifndef QCA_WIFI_3_0_EMU
166 #define SUSPEND_DRAIN_WAIT 500
167 #else
168 #define SUSPEND_DRAIN_WAIT 3000
169 #endif
170 
171 #ifdef IPA_OFFLOAD
172 /* Exclude IPA rings from the interrupt context */
173 #define TX_RING_MASK_VAL	0xb
174 #define RX_RING_MASK_VAL	0x7
175 #else
176 #define TX_RING_MASK_VAL	0xF
177 #define RX_RING_MASK_VAL	0xF
178 #endif
179 
180 #define STR_MAXLEN	64
181 
182 #define RNG_ERR		"SRNG setup failed for"
183 
184 /* Threshold for peer's cached buf queue beyond which frames are dropped */
185 #define DP_RX_CACHED_BUFQ_THRESH 64
186 
187 /**
188  * default_dscp_tid_map - Default DSCP-TID mapping
189  *
190  * DSCP        TID
191  * 000000      0
192  * 001000      1
193  * 010000      2
194  * 011000      3
195  * 100000      4
196  * 101000      5
197  * 110000      6
198  * 111000      7
199  */
200 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
201 	0, 0, 0, 0, 0, 0, 0, 0,
202 	1, 1, 1, 1, 1, 1, 1, 1,
203 	2, 2, 2, 2, 2, 2, 2, 2,
204 	3, 3, 3, 3, 3, 3, 3, 3,
205 	4, 4, 4, 4, 4, 4, 4, 4,
206 	5, 5, 5, 5, 5, 5, 5, 5,
207 	6, 6, 6, 6, 6, 6, 6, 6,
208 	7, 7, 7, 7, 7, 7, 7, 7,
209 };
210 
211 /**
212  * default_pcp_tid_map - Default PCP-TID mapping
213  *
214  * PCP     TID
215  * 000      0
216  * 001      1
217  * 010      2
218  * 011      3
219  * 100      4
220  * 101      5
221  * 110      6
222  * 111      7
223  */
224 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
225 	0, 1, 2, 3, 4, 5, 6, 7,
226 };
227 
228 /**
229  * @brief Cpu to tx ring map
230  */
231 uint8_t
232 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
233 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
234 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
235 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
236 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
237 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
238 #ifdef WLAN_TX_PKT_CAPTURE_ENH
239 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
240 #endif
241 };
242 
243 /**
244  * @brief Select the type of statistics
245  */
246 enum dp_stats_type {
247 	STATS_FW = 0,
248 	STATS_HOST = 1,
249 	STATS_TYPE_MAX = 2,
250 };
251 
252 /**
253  * @brief General Firmware statistics options
254  *
255  */
256 enum dp_fw_stats {
257 	TXRX_FW_STATS_INVALID	= -1,
258 };
259 
260 /**
261  * dp_stats_mapping_table - Firmware and Host statistics
262  * currently supported
263  */
264 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
265 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
266 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
267 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
268 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
269 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
270 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
271 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
272 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
273 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
274 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
275 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
276 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
277 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
278 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
279 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
280 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
281 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
282 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
283 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
284 	/* Last ENUM for HTT FW STATS */
285 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
286 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
287 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
288 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
289 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
290 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
291 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
292 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
293 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
294 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
295 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
296 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
297 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
298 };
299 
300 /* MCL specific functions */
301 #if defined(DP_CON_MON)
302 /**
303  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
304  * @soc: pointer to dp_soc handle
305  * @intr_ctx_num: interrupt context number for which mon mask is needed
306  *
307  * For MCL, monitor mode rings are being processed in timer contexts (polled).
308  * This function is returning 0, since in interrupt mode(softirq based RX),
309  * we donot want to process monitor mode rings in a softirq.
310  *
311  * So, in case packet log is enabled for SAP/STA/P2P modes,
312  * regular interrupt processing will not process monitor mode rings. It would be
313  * done in a separate timer context.
314  *
315  * Return: 0
316  */
317 static inline
318 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
319 {
320 	return 0;
321 }
322 
323 /*
324  * dp_service_mon_rings()- timer to reap monitor rings
325  * reqd as we are not getting ppdu end interrupts
326  * @arg: SoC Handle
327  *
328  * Return:
329  *
330  */
331 static void dp_service_mon_rings(void *arg)
332 {
333 	struct dp_soc *soc = (struct dp_soc *)arg;
334 	int ring = 0, work_done, mac_id;
335 	struct dp_pdev *pdev = NULL;
336 
337 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
338 		pdev = soc->pdev_list[ring];
339 		if (!pdev)
340 			continue;
341 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
342 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
343 								pdev->pdev_id);
344 			work_done = dp_mon_process(soc, mac_for_pdev,
345 						   QCA_NAPI_BUDGET);
346 
347 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
348 				  FL("Reaped %d descs from Monitor rings"),
349 				  work_done);
350 		}
351 	}
352 
353 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
354 }
355 
356 #ifndef REMOVE_PKT_LOG
357 /**
358  * dp_pkt_log_init() - API to initialize packet log
359  * @soc_hdl: Datapath soc handle
360  * @pdev_id: id of data path pdev handle
361  * @scn: HIF context
362  *
363  * Return: none
364  */
365 void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn)
366 {
367 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
368 	struct dp_pdev *handle =
369 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
370 
371 	if (!handle) {
372 		dp_err("pdev handle is NULL");
373 		return;
374 	}
375 
376 	if (handle->pkt_log_init) {
377 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
378 			  "%s: Packet log not initialized", __func__);
379 		return;
380 	}
381 
382 	pktlog_sethandle(&handle->pl_dev, scn);
383 	pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
384 
385 	if (pktlogmod_init(scn)) {
386 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
387 			  "%s: pktlogmod_init failed", __func__);
388 		handle->pkt_log_init = false;
389 	} else {
390 		handle->pkt_log_init = true;
391 	}
392 }
393 
394 /**
395  * dp_pkt_log_con_service() - connect packet log service
396  * @soc_hdl: Datapath soc handle
397  * @pdev_id: id of data path pdev handle
398  * @scn: device context
399  *
400  * Return: none
401  */
402 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
403 				   uint8_t pdev_id, void *scn)
404 {
405 	dp_pkt_log_init(soc_hdl, pdev_id, scn);
406 	pktlog_htc_attach();
407 }
408 
409 /**
410  * dp_get_num_rx_contexts() - get number of RX contexts
411  * @soc_hdl: cdp opaque soc handle
412  *
413  * Return: number of RX contexts
414  */
415 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
416 {
417 	int i;
418 	int num_rx_contexts = 0;
419 
420 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
421 
422 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
423 		if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
424 			num_rx_contexts++;
425 
426 	return num_rx_contexts;
427 }
428 
429 /**
430  * dp_pktlogmod_exit() - API to cleanup pktlog info
431  * @pdev: Pdev handle
432  *
433  * Return: none
434  */
435 static void dp_pktlogmod_exit(struct dp_pdev *pdev)
436 {
437 	struct dp_soc *soc = pdev->soc;
438 	struct hif_opaque_softc *scn = soc->hif_handle;
439 
440 	if (!scn) {
441 		dp_err("Invalid hif(scn) handle");
442 		return;
443 	}
444 
445 	/* stop mon_reap_timer if it has been started */
446 	if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED &&
447 	    soc->reap_timer_init)
448 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
449 
450 	pktlogmod_exit(scn);
451 	pdev->pkt_log_init = false;
452 }
453 #endif
454 #else
455 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
456 
457 /**
458  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
459  * @soc: pointer to dp_soc handle
460  * @intr_ctx_num: interrupt context number for which mon mask is needed
461  *
462  * Return: mon mask value
463  */
464 static inline
465 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
466 {
467 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
468 }
469 #endif
470 
471 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
472 				 uint8_t vdev_id,
473 				 uint8_t *peer_mac,
474 				 uint8_t *mac_addr,
475 				 enum cdp_txrx_ast_entry_type type,
476 				 uint32_t flags)
477 {
478 	int ret = -1;
479 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
480 						       peer_mac, 0, vdev_id);
481 
482 	if (!peer || peer->delete_in_progress) {
483 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
484 			  "%s: Peer is NULL!\n", __func__);
485 		goto fail;
486 	}
487 
488 	ret = dp_peer_add_ast((struct dp_soc *)soc_hdl,
489 			      peer,
490 			      mac_addr,
491 			      type,
492 			      flags);
493 fail:
494 	if (peer)
495 		dp_peer_unref_delete(peer);
496 
497 	return ret;
498 }
499 
500 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
501 						uint8_t vdev_id,
502 						uint8_t *peer_mac,
503 						uint8_t *wds_macaddr,
504 						uint32_t flags)
505 {
506 	int status = -1;
507 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
508 	struct dp_ast_entry  *ast_entry = NULL;
509 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
510 						       peer_mac, 0, vdev_id);
511 
512 	if (!peer || peer->delete_in_progress) {
513 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
514 			  "%s: Peer is NULL!\n", __func__);
515 		goto fail;
516 	}
517 
518 	qdf_spin_lock_bh(&soc->ast_lock);
519 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
520 						    peer->vdev->pdev->pdev_id);
521 
522 	if (ast_entry) {
523 		status = dp_peer_update_ast(soc,
524 					    peer,
525 					    ast_entry, flags);
526 	}
527 	qdf_spin_unlock_bh(&soc->ast_lock);
528 
529 fail:
530 	if (peer)
531 		dp_peer_unref_delete(peer);
532 
533 	return status;
534 }
535 
536 /*
537  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
538  * @soc_handle:		Datapath SOC handle
539  * @wds_macaddr:	WDS entry MAC Address
540  * @peer_macaddr:	WDS entry MAC Address
541  * @vdev_id:		id of vdev handle
542  * Return: QDF_STATUS
543  */
544 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
545 					 uint8_t *wds_macaddr,
546 					 uint8_t *peer_mac_addr,
547 					 uint8_t vdev_id)
548 {
549 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
550 	struct dp_ast_entry *ast_entry = NULL;
551 	struct dp_ast_entry *tmp_ast_entry;
552 	struct dp_peer *peer;
553 	struct dp_pdev *pdev;
554 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
555 
556 	if (!vdev)
557 		return QDF_STATUS_E_FAILURE;
558 
559 	pdev = vdev->pdev;
560 
561 	if (peer_mac_addr) {
562 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
563 					      0, vdev->vdev_id);
564 		if (!peer) {
565 			return QDF_STATUS_E_FAILURE;
566 		}
567 
568 		if (peer->delete_in_progress) {
569 			dp_peer_unref_delete(peer);
570 			return QDF_STATUS_E_FAILURE;
571 		}
572 
573 		qdf_spin_lock_bh(&soc->ast_lock);
574 		DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
575 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
576 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
577 				dp_peer_del_ast(soc, ast_entry);
578 		}
579 		qdf_spin_unlock_bh(&soc->ast_lock);
580 		dp_peer_unref_delete(peer);
581 
582 		return QDF_STATUS_SUCCESS;
583 	} else if (wds_macaddr) {
584 		qdf_spin_lock_bh(&soc->ast_lock);
585 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
586 							    pdev->pdev_id);
587 
588 		if (ast_entry) {
589 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
590 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
591 				dp_peer_del_ast(soc, ast_entry);
592 		}
593 		qdf_spin_unlock_bh(&soc->ast_lock);
594 	}
595 
596 	return QDF_STATUS_SUCCESS;
597 }
598 
599 /*
600  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
601  * @soc:		Datapath SOC handle
602  *
603  * Return: QDF_STATUS
604  */
605 static QDF_STATUS
606 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
607 			     uint8_t vdev_id)
608 {
609 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
610 	struct dp_pdev *pdev;
611 	struct dp_vdev *vdev;
612 	struct dp_peer *peer;
613 	struct dp_ast_entry *ase, *temp_ase;
614 	int i;
615 
616 	qdf_spin_lock_bh(&soc->ast_lock);
617 
618 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
619 		pdev = soc->pdev_list[i];
620 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
621 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
622 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
623 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
624 					if ((ase->type ==
625 						CDP_TXRX_AST_TYPE_WDS_HM) ||
626 					    (ase->type ==
627 						CDP_TXRX_AST_TYPE_WDS_HM_SEC))
628 						dp_peer_del_ast(soc, ase);
629 				}
630 			}
631 		}
632 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
633 	}
634 
635 	qdf_spin_unlock_bh(&soc->ast_lock);
636 
637 	return QDF_STATUS_SUCCESS;
638 }
639 
640 /*
641  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
642  * @soc:		Datapath SOC handle
643  *
644  * Return: None
645  */
646 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
647 {
648 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
649 	struct dp_pdev *pdev;
650 	struct dp_vdev *vdev;
651 	struct dp_peer *peer;
652 	struct dp_ast_entry *ase, *temp_ase;
653 	int i;
654 
655 	qdf_spin_lock_bh(&soc->ast_lock);
656 
657 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
658 		pdev = soc->pdev_list[i];
659 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
660 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
661 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
662 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
663 					if ((ase->type ==
664 						CDP_TXRX_AST_TYPE_STATIC) ||
665 						(ase->type ==
666 						 CDP_TXRX_AST_TYPE_SELF) ||
667 						(ase->type ==
668 						 CDP_TXRX_AST_TYPE_STA_BSS))
669 						continue;
670 					dp_peer_del_ast(soc, ase);
671 				}
672 			}
673 		}
674 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
675 	}
676 
677 	qdf_spin_unlock_bh(&soc->ast_lock);
678 }
679 
680 /**
681  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
682  *                                       and return ast entry information
683  *                                       of first ast entry found in the
684  *                                       table with given mac address
685  *
686  * @soc : data path soc handle
687  * @ast_mac_addr : AST entry mac address
688  * @ast_entry_info : ast entry information
689  *
690  * return : true if ast entry found with ast_mac_addr
691  *          false if ast entry not found
692  */
693 static bool dp_peer_get_ast_info_by_soc_wifi3
694 	(struct cdp_soc_t *soc_hdl,
695 	 uint8_t *ast_mac_addr,
696 	 struct cdp_ast_entry_info *ast_entry_info)
697 {
698 	struct dp_ast_entry *ast_entry = NULL;
699 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
700 
701 	qdf_spin_lock_bh(&soc->ast_lock);
702 
703 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
704 	if (!ast_entry || !ast_entry->peer) {
705 		qdf_spin_unlock_bh(&soc->ast_lock);
706 		return false;
707 	}
708 	if (ast_entry->delete_in_progress && !ast_entry->callback) {
709 		qdf_spin_unlock_bh(&soc->ast_lock);
710 		return false;
711 	}
712 	ast_entry_info->type = ast_entry->type;
713 	ast_entry_info->pdev_id = ast_entry->pdev_id;
714 	ast_entry_info->vdev_id = ast_entry->peer->vdev->vdev_id;
715 	ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
716 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
717 		     &ast_entry->peer->mac_addr.raw[0],
718 		     QDF_MAC_ADDR_SIZE);
719 	qdf_spin_unlock_bh(&soc->ast_lock);
720 	return true;
721 }
722 
723 /**
724  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
725  *                                          and return ast entry information
726  *                                          if mac address and pdev_id matches
727  *
728  * @soc : data path soc handle
729  * @ast_mac_addr : AST entry mac address
730  * @pdev_id : pdev_id
731  * @ast_entry_info : ast entry information
732  *
733  * return : true if ast entry found with ast_mac_addr
734  *          false if ast entry not found
735  */
736 static bool dp_peer_get_ast_info_by_pdevid_wifi3
737 		(struct cdp_soc_t *soc_hdl,
738 		 uint8_t *ast_mac_addr,
739 		 uint8_t pdev_id,
740 		 struct cdp_ast_entry_info *ast_entry_info)
741 {
742 	struct dp_ast_entry *ast_entry;
743 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
744 
745 	qdf_spin_lock_bh(&soc->ast_lock);
746 
747 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, pdev_id);
748 
749 	if (!ast_entry || !ast_entry->peer) {
750 		qdf_spin_unlock_bh(&soc->ast_lock);
751 		return false;
752 	}
753 	if (ast_entry->delete_in_progress && !ast_entry->callback) {
754 		qdf_spin_unlock_bh(&soc->ast_lock);
755 		return false;
756 	}
757 	ast_entry_info->type = ast_entry->type;
758 	ast_entry_info->pdev_id = ast_entry->pdev_id;
759 	ast_entry_info->vdev_id = ast_entry->peer->vdev->vdev_id;
760 	ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
761 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
762 		     &ast_entry->peer->mac_addr.raw[0],
763 		     QDF_MAC_ADDR_SIZE);
764 	qdf_spin_unlock_bh(&soc->ast_lock);
765 	return true;
766 }
767 
768 /**
769  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
770  *                            with given mac address
771  *
772  * @soc : data path soc handle
773  * @ast_mac_addr : AST entry mac address
774  * @callback : callback function to called on ast delete response from FW
775  * @cookie : argument to be passed to callback
776  *
777  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
778  *          is sent
779  *          QDF_STATUS_E_INVAL false if ast entry not found
780  */
781 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
782 					       uint8_t *mac_addr,
783 					       txrx_ast_free_cb callback,
784 					       void *cookie)
785 
786 {
787 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
788 	struct dp_ast_entry *ast_entry = NULL;
789 	txrx_ast_free_cb cb = NULL;
790 	void *arg = NULL;
791 
792 	qdf_spin_lock_bh(&soc->ast_lock);
793 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
794 	if (!ast_entry) {
795 		qdf_spin_unlock_bh(&soc->ast_lock);
796 		return -QDF_STATUS_E_INVAL;
797 	}
798 
799 	if (ast_entry->callback) {
800 		cb = ast_entry->callback;
801 		arg = ast_entry->cookie;
802 	}
803 
804 	ast_entry->callback = callback;
805 	ast_entry->cookie = cookie;
806 
807 	/*
808 	 * if delete_in_progress is set AST delete is sent to target
809 	 * and host is waiting for response should not send delete
810 	 * again
811 	 */
812 	if (!ast_entry->delete_in_progress)
813 		dp_peer_del_ast(soc, ast_entry);
814 
815 	qdf_spin_unlock_bh(&soc->ast_lock);
816 	if (cb) {
817 		cb(soc->ctrl_psoc,
818 		   dp_soc_to_cdp_soc(soc),
819 		   arg,
820 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
821 	}
822 	return QDF_STATUS_SUCCESS;
823 }
824 
825 /**
826  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
827  *                                   table if mac address and pdev_id matches
828  *
829  * @soc : data path soc handle
830  * @ast_mac_addr : AST entry mac address
831  * @pdev_id : pdev id
832  * @callback : callback function to called on ast delete response from FW
833  * @cookie : argument to be passed to callback
834  *
835  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
836  *          is sent
837  *          QDF_STATUS_E_INVAL false if ast entry not found
838  */
839 
840 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
841 						uint8_t *mac_addr,
842 						uint8_t pdev_id,
843 						txrx_ast_free_cb callback,
844 						void *cookie)
845 
846 {
847 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
848 	struct dp_ast_entry *ast_entry;
849 	txrx_ast_free_cb cb = NULL;
850 	void *arg = NULL;
851 
852 	qdf_spin_lock_bh(&soc->ast_lock);
853 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
854 
855 	if (!ast_entry) {
856 		qdf_spin_unlock_bh(&soc->ast_lock);
857 		return -QDF_STATUS_E_INVAL;
858 	}
859 
860 	if (ast_entry->callback) {
861 		cb = ast_entry->callback;
862 		arg = ast_entry->cookie;
863 	}
864 
865 	ast_entry->callback = callback;
866 	ast_entry->cookie = cookie;
867 
868 	/*
869 	 * if delete_in_progress is set AST delete is sent to target
870 	 * and host is waiting for response should not sent delete
871 	 * again
872 	 */
873 	if (!ast_entry->delete_in_progress)
874 		dp_peer_del_ast(soc, ast_entry);
875 
876 	qdf_spin_unlock_bh(&soc->ast_lock);
877 
878 	if (cb) {
879 		cb(soc->ctrl_psoc,
880 		   dp_soc_to_cdp_soc(soc),
881 		   arg,
882 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
883 	}
884 	return QDF_STATUS_SUCCESS;
885 }
886 
887 /**
888  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
889  * @ring_num: ring num of the ring being queried
890  * @grp_mask: the grp_mask array for the ring type in question.
891  *
892  * The grp_mask array is indexed by group number and the bit fields correspond
893  * to ring numbers.  We are finding which interrupt group a ring belongs to.
894  *
895  * Return: the index in the grp_mask array with the ring number.
896  * -QDF_STATUS_E_NOENT if no entry is found
897  */
898 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
899 {
900 	int ext_group_num;
901 	int mask = 1 << ring_num;
902 
903 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
904 	     ext_group_num++) {
905 		if (mask & grp_mask[ext_group_num])
906 			return ext_group_num;
907 	}
908 
909 	return -QDF_STATUS_E_NOENT;
910 }
911 
912 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
913 				       enum hal_ring_type ring_type,
914 				       int ring_num)
915 {
916 	int *grp_mask;
917 
918 	switch (ring_type) {
919 	case WBM2SW_RELEASE:
920 		/* dp_tx_comp_handler - soc->tx_comp_ring */
921 		if (ring_num < 3)
922 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
923 
924 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
925 		else if (ring_num == 3) {
926 			/* sw treats this as a separate ring type */
927 			grp_mask = &soc->wlan_cfg_ctx->
928 				int_rx_wbm_rel_ring_mask[0];
929 			ring_num = 0;
930 		} else {
931 			qdf_assert(0);
932 			return -QDF_STATUS_E_NOENT;
933 		}
934 	break;
935 
936 	case REO_EXCEPTION:
937 		/* dp_rx_err_process - &soc->reo_exception_ring */
938 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
939 	break;
940 
941 	case REO_DST:
942 		/* dp_rx_process - soc->reo_dest_ring */
943 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
944 	break;
945 
946 	case REO_STATUS:
947 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
948 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
949 	break;
950 
951 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
952 	case RXDMA_MONITOR_STATUS:
953 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
954 	case RXDMA_MONITOR_DST:
955 		/* dp_mon_process */
956 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
957 	break;
958 	case RXDMA_DST:
959 		/* dp_rxdma_err_process */
960 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
961 	break;
962 
963 	case RXDMA_BUF:
964 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
965 	break;
966 
967 	case RXDMA_MONITOR_BUF:
968 		/* TODO: support low_thresh interrupt */
969 		return -QDF_STATUS_E_NOENT;
970 	break;
971 
972 	case TCL_DATA:
973 	case TCL_CMD:
974 	case REO_CMD:
975 	case SW2WBM_RELEASE:
976 	case WBM_IDLE_LINK:
977 		/* normally empty SW_TO_HW rings */
978 		return -QDF_STATUS_E_NOENT;
979 	break;
980 
981 	case TCL_STATUS:
982 	case REO_REINJECT:
983 		/* misc unused rings */
984 		return -QDF_STATUS_E_NOENT;
985 	break;
986 
987 	case CE_SRC:
988 	case CE_DST:
989 	case CE_DST_STATUS:
990 		/* CE_rings - currently handled by hif */
991 	default:
992 		return -QDF_STATUS_E_NOENT;
993 	break;
994 	}
995 
996 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
997 }
998 
999 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
1000 			      *ring_params, int ring_type, int ring_num)
1001 {
1002 	int msi_group_number;
1003 	int msi_data_count;
1004 	int ret;
1005 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1006 
1007 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1008 					    &msi_data_count, &msi_data_start,
1009 					    &msi_irq_start);
1010 
1011 	if (ret)
1012 		return;
1013 
1014 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
1015 						       ring_num);
1016 	if (msi_group_number < 0) {
1017 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1018 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
1019 			ring_type, ring_num);
1020 		ring_params->msi_addr = 0;
1021 		ring_params->msi_data = 0;
1022 		return;
1023 	}
1024 
1025 	if (msi_group_number > msi_data_count) {
1026 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
1027 			FL("2 msi_groups will share an msi; msi_group_num %d"),
1028 			msi_group_number);
1029 
1030 		QDF_ASSERT(0);
1031 	}
1032 
1033 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1034 
1035 	ring_params->msi_addr = addr_low;
1036 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1037 	ring_params->msi_data = (msi_group_number % msi_data_count)
1038 		+ msi_data_start;
1039 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1040 }
1041 
1042 /**
1043  * dp_print_ast_stats() - Dump AST table contents
1044  * @soc: Datapath soc handle
1045  *
1046  * return void
1047  */
1048 #ifdef FEATURE_AST
1049 void dp_print_ast_stats(struct dp_soc *soc)
1050 {
1051 	uint8_t i;
1052 	uint8_t num_entries = 0;
1053 	struct dp_vdev *vdev;
1054 	struct dp_pdev *pdev;
1055 	struct dp_peer *peer;
1056 	struct dp_ast_entry *ase, *tmp_ase;
1057 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1058 			"NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
1059 			"DA", "HMWDS_SEC"};
1060 
1061 	DP_PRINT_STATS("AST Stats:");
1062 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1063 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1064 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1065 	DP_PRINT_STATS("	Entries MAP ERR  = %d", soc->stats.ast.map_err);
1066 
1067 	DP_PRINT_STATS("AST Table:");
1068 
1069 	qdf_spin_lock_bh(&soc->ast_lock);
1070 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1071 		pdev = soc->pdev_list[i];
1072 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
1073 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1074 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1075 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1076 				    DP_PRINT_STATS("%6d mac_addr = %pM"
1077 					    " peer_mac_addr = %pM"
1078 					    " peer_id = %u"
1079 					    " type = %s"
1080 					    " next_hop = %d"
1081 					    " is_active = %d"
1082 					    " ast_idx = %d"
1083 					    " ast_hash = %d"
1084 					    " delete_in_progress = %d"
1085 					    " pdev_id = %d"
1086 					    " vdev_id = %d",
1087 					    ++num_entries,
1088 					    ase->mac_addr.raw,
1089 					    ase->peer->mac_addr.raw,
1090 					    ase->peer->peer_ids[0],
1091 					    type[ase->type],
1092 					    ase->next_hop,
1093 					    ase->is_active,
1094 					    ase->ast_idx,
1095 					    ase->ast_hash_value,
1096 					    ase->delete_in_progress,
1097 					    ase->pdev_id,
1098 					    vdev->vdev_id);
1099 				}
1100 			}
1101 		}
1102 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1103 	}
1104 	qdf_spin_unlock_bh(&soc->ast_lock);
1105 }
1106 #else
1107 void dp_print_ast_stats(struct dp_soc *soc)
1108 {
1109 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1110 	return;
1111 }
1112 #endif
1113 
1114 /**
1115  *  dp_print_peer_table() - Dump all Peer stats
1116  * @vdev: Datapath Vdev handle
1117  *
1118  * return void
1119  */
1120 static void dp_print_peer_table(struct dp_vdev *vdev)
1121 {
1122 	struct dp_peer *peer = NULL;
1123 
1124 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1125 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1126 		if (!peer) {
1127 			DP_PRINT_STATS("Invalid Peer");
1128 			return;
1129 		}
1130 		DP_PRINT_STATS("    peer_mac_addr = %pM"
1131 			       " nawds_enabled = %d"
1132 			       " bss_peer = %d"
1133 			       " wds_enabled = %d"
1134 			       " tx_cap_enabled = %d"
1135 			       " rx_cap_enabled = %d"
1136 			       " delete in progress = %d"
1137 			       " peer id = %d",
1138 			       peer->mac_addr.raw,
1139 			       peer->nawds_enabled,
1140 			       peer->bss_peer,
1141 			       peer->wds_enabled,
1142 			       peer->tx_cap_enabled,
1143 			       peer->rx_cap_enabled,
1144 			       peer->delete_in_progress,
1145 			       peer->peer_ids[0]);
1146 	}
1147 }
1148 
1149 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
1150 /**
1151  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
1152  * threshold values from the wlan_srng_cfg table for each ring type
1153  * @soc: device handle
1154  * @ring_params: per ring specific parameters
1155  * @ring_type: Ring type
1156  * @ring_num: Ring number for a given ring type
1157  *
1158  * Fill the ring params with the interrupt threshold
1159  * configuration parameters available in the per ring type wlan_srng_cfg
1160  * table.
1161  *
1162  * Return: None
1163  */
1164 static void
1165 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1166 				       struct hal_srng_params *ring_params,
1167 				       int ring_type, int ring_num,
1168 				       int num_entries)
1169 {
1170 	if (ring_type == WBM2SW_RELEASE && (ring_num == 3)) {
1171 		ring_params->intr_timer_thres_us =
1172 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1173 		ring_params->intr_batch_cntr_thres_entries =
1174 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1175 	} else {
1176 		ring_params->intr_timer_thres_us =
1177 				soc->wlan_srng_cfg[ring_type].timer_threshold;
1178 		ring_params->intr_batch_cntr_thres_entries =
1179 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
1180 	}
1181 	ring_params->low_threshold =
1182 			soc->wlan_srng_cfg[ring_type].low_threshold;
1183 
1184 	if (ring_params->low_threshold)
1185 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1186 }
1187 #else
1188 static void
1189 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1190 				       struct hal_srng_params *ring_params,
1191 				       int ring_type, int ring_num,
1192 				       int num_entries)
1193 {
1194 	if (ring_type == REO_DST) {
1195 		ring_params->intr_timer_thres_us =
1196 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1197 		ring_params->intr_batch_cntr_thres_entries =
1198 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1199 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
1200 		ring_params->intr_timer_thres_us =
1201 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1202 		ring_params->intr_batch_cntr_thres_entries =
1203 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1204 	} else {
1205 		ring_params->intr_timer_thres_us =
1206 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1207 		ring_params->intr_batch_cntr_thres_entries =
1208 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1209 	}
1210 
1211 	/* Enable low threshold interrupts for rx buffer rings (regular and
1212 	 * monitor buffer rings.
1213 	 * TODO: See if this is required for any other ring
1214 	 */
1215 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1216 	    (ring_type == RXDMA_MONITOR_STATUS)) {
1217 		/* TODO: Setting low threshold to 1/8th of ring size
1218 		 * see if this needs to be configurable
1219 		 */
1220 		ring_params->low_threshold = num_entries >> 3;
1221 		ring_params->intr_timer_thres_us =
1222 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1223 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1224 		ring_params->intr_batch_cntr_thres_entries = 0;
1225 	}
1226 }
1227 #endif
1228 
1229 /**
1230  * dp_srng_setup() - Internal function to setup SRNG rings used by data path
1231  * @soc: datapath soc handle
1232  * @srng: srng handle
1233  * @ring_type: ring that needs to be configured
1234  * @mac_id: mac number
1235  * @num_entries: Total number of entries for a given ring
1236  *
1237  * Return: non-zero - failure/zero - success
1238  */
1239 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
1240 			 int ring_type, int ring_num, int mac_id,
1241 			 uint32_t num_entries, bool cached)
1242 {
1243 	hal_soc_handle_t hal_soc = soc->hal_soc;
1244 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
1245 	/* TODO: See if we should get align size from hal */
1246 	uint32_t ring_base_align = 8;
1247 	struct hal_srng_params ring_params;
1248 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
1249 
1250 	/* TODO: Currently hal layer takes care of endianness related settings.
1251 	 * See if these settings need to passed from DP layer
1252 	 */
1253 	ring_params.flags = 0;
1254 
1255 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
1256 	srng->hal_srng = NULL;
1257 	srng->alloc_size = num_entries * entry_size;
1258 	srng->num_entries = num_entries;
1259 
1260 	if (!dp_is_soc_reinit(soc)) {
1261 		if (!cached) {
1262 			ring_params.ring_base_vaddr =
1263 			    qdf_aligned_mem_alloc_consistent(
1264 						soc->osdev, &srng->alloc_size,
1265 						&srng->base_vaddr_unaligned,
1266 						&srng->base_paddr_unaligned,
1267 						&ring_params.ring_base_paddr,
1268 						ring_base_align);
1269 		} else {
1270 			ring_params.ring_base_vaddr = qdf_aligned_malloc(
1271 					&srng->alloc_size,
1272 					&srng->base_vaddr_unaligned,
1273 					&srng->base_paddr_unaligned,
1274 					&ring_params.ring_base_paddr,
1275 					ring_base_align);
1276 		}
1277 
1278 		if (!ring_params.ring_base_vaddr) {
1279 			dp_err("alloc failed - ring_type: %d, ring_num %d",
1280 					ring_type, ring_num);
1281 			return QDF_STATUS_E_NOMEM;
1282 		}
1283 	}
1284 
1285 	ring_params.ring_base_paddr = (qdf_dma_addr_t)qdf_align(
1286 			(unsigned long)(srng->base_paddr_unaligned),
1287 			ring_base_align);
1288 
1289 	ring_params.ring_base_vaddr = (void *)(
1290 			(unsigned long)(srng->base_vaddr_unaligned) +
1291 			((unsigned long)(ring_params.ring_base_paddr) -
1292 			 (unsigned long)(srng->base_paddr_unaligned)));
1293 
1294 	qdf_assert_always(ring_params.ring_base_vaddr);
1295 
1296 	ring_params.num_entries = num_entries;
1297 
1298 	dp_verbose_debug("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
1299 			 ring_type, ring_num,
1300 			 (void *)ring_params.ring_base_vaddr,
1301 			 (void *)ring_params.ring_base_paddr,
1302 			 ring_params.num_entries);
1303 
1304 	if (soc->intr_mode == DP_INTR_MSI) {
1305 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
1306 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
1307 				 ring_type, ring_num);
1308 
1309 	} else {
1310 		ring_params.msi_data = 0;
1311 		ring_params.msi_addr = 0;
1312 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
1313 				 ring_type, ring_num);
1314 	}
1315 
1316 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
1317 					       ring_type, ring_num,
1318 					       num_entries);
1319 
1320 	if (cached) {
1321 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
1322 		srng->cached = 1;
1323 	}
1324 
1325 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
1326 		mac_id, &ring_params);
1327 
1328 	if (!srng->hal_srng) {
1329 		if (cached) {
1330 			qdf_mem_free(srng->base_vaddr_unaligned);
1331 		} else {
1332 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1333 						srng->alloc_size,
1334 						srng->base_vaddr_unaligned,
1335 						srng->base_paddr_unaligned, 0);
1336 		}
1337 	}
1338 
1339 	return 0;
1340 }
1341 
1342 /*
1343  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
1344  * @soc: DP SOC handle
1345  * @srng: source ring structure
1346  * @ring_type: type of ring
1347  * @ring_num: ring number
1348  *
1349  * Return: None
1350  */
1351 static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
1352 			   int ring_type, int ring_num)
1353 {
1354 	if (!srng->hal_srng) {
1355 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1356 			  FL("Ring type: %d, num:%d not setup"),
1357 			  ring_type, ring_num);
1358 		return;
1359 	}
1360 
1361 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1362 	srng->hal_srng = NULL;
1363 }
1364 
1365 /**
1366  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
1367  * Any buffers allocated and attached to ring entries are expected to be freed
1368  * before calling this function.
1369  */
1370 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
1371 	int ring_type, int ring_num)
1372 {
1373 	if (!dp_is_soc_reinit(soc)) {
1374 		if (!srng->hal_srng && (srng->alloc_size == 0)) {
1375 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1376 				  FL("Ring type: %d, num:%d not setup"),
1377 				  ring_type, ring_num);
1378 			return;
1379 		}
1380 
1381 		if (srng->hal_srng) {
1382 			hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1383 			srng->hal_srng = NULL;
1384 		}
1385 	}
1386 
1387 	if (srng->alloc_size && srng->base_vaddr_unaligned) {
1388 		if (!srng->cached) {
1389 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1390 						srng->alloc_size,
1391 						srng->base_vaddr_unaligned,
1392 						srng->base_paddr_unaligned, 0);
1393 		} else {
1394 			qdf_mem_free(srng->base_vaddr_unaligned);
1395 		}
1396 		srng->alloc_size = 0;
1397 		srng->base_vaddr_unaligned = NULL;
1398 	}
1399 	srng->hal_srng = NULL;
1400 }
1401 
1402 /* TODO: Need this interface from HIF */
1403 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
1404 
1405 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1406 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
1407 			 hal_ring_handle_t hal_ring_hdl)
1408 {
1409 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1410 	uint32_t hp, tp;
1411 	uint8_t ring_id;
1412 
1413 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
1414 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
1415 
1416 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
1417 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
1418 
1419 	return hal_srng_access_start(hal_soc, hal_ring_hdl);
1420 }
1421 
1422 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
1423 			hal_ring_handle_t hal_ring_hdl)
1424 {
1425 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1426 	uint32_t hp, tp;
1427 	uint8_t ring_id;
1428 
1429 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
1430 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
1431 
1432 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
1433 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
1434 
1435 	return hal_srng_access_end(hal_soc, hal_ring_hdl);
1436 }
1437 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1438 
1439 /*
1440  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1441  * @dp_ctx: DP SOC handle
1442  * @budget: Number of frames/descriptors that can be processed in one shot
1443  *
1444  * Return: remaining budget/quota for the soc device
1445  */
1446 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
1447 {
1448 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1449 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1450 	struct dp_soc *soc = int_ctx->soc;
1451 	int ring = 0;
1452 	uint32_t work_done  = 0;
1453 	int budget = dp_budget;
1454 	uint8_t tx_mask = int_ctx->tx_ring_mask;
1455 	uint8_t rx_mask = int_ctx->rx_ring_mask;
1456 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1457 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1458 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1459 	uint32_t remaining_quota = dp_budget;
1460 	struct dp_pdev *pdev = NULL;
1461 	int mac_id;
1462 
1463 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
1464 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
1465 			 reo_status_mask,
1466 			 int_ctx->rx_mon_ring_mask,
1467 			 int_ctx->host2rxdma_ring_mask,
1468 			 int_ctx->rxdma2host_ring_mask);
1469 
1470 	/* Process Tx completion interrupts first to return back buffers */
1471 	while (tx_mask) {
1472 		if (tx_mask & 0x1) {
1473 			work_done = dp_tx_comp_handler(int_ctx,
1474 						       soc,
1475 						       soc->tx_comp_ring[ring].hal_srng,
1476 						       ring, remaining_quota);
1477 
1478 			if (work_done) {
1479 				intr_stats->num_tx_ring_masks[ring]++;
1480 				dp_verbose_debug("tx mask 0x%x ring %d, budget %d, work_done %d",
1481 						 tx_mask, ring, budget,
1482 						 work_done);
1483 			}
1484 
1485 			budget -= work_done;
1486 			if (budget <= 0)
1487 				goto budget_done;
1488 
1489 			remaining_quota = budget;
1490 		}
1491 		tx_mask = tx_mask >> 1;
1492 		ring++;
1493 	}
1494 
1495 	/* Process REO Exception ring interrupt */
1496 	if (rx_err_mask) {
1497 		work_done = dp_rx_err_process(int_ctx, soc,
1498 					      soc->reo_exception_ring.hal_srng,
1499 					      remaining_quota);
1500 
1501 		if (work_done) {
1502 			intr_stats->num_rx_err_ring_masks++;
1503 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
1504 					 work_done, budget);
1505 		}
1506 
1507 		budget -=  work_done;
1508 		if (budget <= 0) {
1509 			goto budget_done;
1510 		}
1511 		remaining_quota = budget;
1512 	}
1513 
1514 	/* Process Rx WBM release ring interrupt */
1515 	if (rx_wbm_rel_mask) {
1516 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
1517 						  soc->rx_rel_ring.hal_srng,
1518 						  remaining_quota);
1519 
1520 		if (work_done) {
1521 			intr_stats->num_rx_wbm_rel_ring_masks++;
1522 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
1523 					 work_done, budget);
1524 		}
1525 
1526 		budget -=  work_done;
1527 		if (budget <= 0) {
1528 			goto budget_done;
1529 		}
1530 		remaining_quota = budget;
1531 	}
1532 
1533 	/* Process Rx interrupts */
1534 	if (rx_mask) {
1535 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1536 			if (!(rx_mask & (1 << ring)))
1537 				continue;
1538 			work_done = dp_rx_process(int_ctx,
1539 						  soc->reo_dest_ring[ring].hal_srng,
1540 						  ring,
1541 						  remaining_quota);
1542 			if (work_done) {
1543 				intr_stats->num_rx_ring_masks[ring]++;
1544 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
1545 						 rx_mask, ring,
1546 						 work_done, budget);
1547 				budget -=  work_done;
1548 				if (budget <= 0)
1549 					goto budget_done;
1550 				remaining_quota = budget;
1551 			}
1552 		}
1553 	}
1554 
1555 	if (reo_status_mask) {
1556 		if (dp_reo_status_ring_handler(int_ctx, soc))
1557 			int_ctx->intr_stats.num_reo_status_ring_masks++;
1558 	}
1559 
1560 	/* Process LMAC interrupts */
1561 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
1562 		pdev = soc->pdev_list[ring];
1563 		if (!pdev)
1564 			continue;
1565 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1566 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
1567 								pdev->pdev_id);
1568 			if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1569 				work_done = dp_mon_process(soc, mac_for_pdev,
1570 							   remaining_quota);
1571 				if (work_done)
1572 					intr_stats->num_rx_mon_ring_masks++;
1573 				budget -= work_done;
1574 				if (budget <= 0)
1575 					goto budget_done;
1576 				remaining_quota = budget;
1577 			}
1578 
1579 			if (int_ctx->rxdma2host_ring_mask &
1580 					(1 << mac_for_pdev)) {
1581 				work_done = dp_rxdma_err_process(int_ctx, soc,
1582 								 mac_for_pdev,
1583 								 remaining_quota);
1584 				if (work_done)
1585 					intr_stats->num_rxdma2host_ring_masks++;
1586 				budget -=  work_done;
1587 				if (budget <= 0)
1588 					goto budget_done;
1589 				remaining_quota = budget;
1590 			}
1591 
1592 			if (int_ctx->host2rxdma_ring_mask &
1593 						(1 << mac_for_pdev)) {
1594 				union dp_rx_desc_list_elem_t *desc_list = NULL;
1595 				union dp_rx_desc_list_elem_t *tail = NULL;
1596 				struct dp_srng *rx_refill_buf_ring =
1597 					&pdev->rx_refill_buf_ring;
1598 
1599 				intr_stats->num_host2rxdma_ring_masks++;
1600 				DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1601 						1);
1602 				dp_rx_buffers_replenish(soc, mac_for_pdev,
1603 							rx_refill_buf_ring,
1604 							&soc->rx_desc_buf[mac_for_pdev],
1605 							0, &desc_list, &tail);
1606 			}
1607 		}
1608 	}
1609 
1610 	qdf_lro_flush(int_ctx->lro_ctx);
1611 	intr_stats->num_masks++;
1612 
1613 budget_done:
1614 	return dp_budget - budget;
1615 }
1616 
1617 /* dp_interrupt_timer()- timer poll for interrupts
1618  *
1619  * @arg: SoC Handle
1620  *
1621  * Return:
1622  *
1623  */
1624 static void dp_interrupt_timer(void *arg)
1625 {
1626 	struct dp_soc *soc = (struct dp_soc *) arg;
1627 	int i;
1628 
1629 	if (qdf_atomic_read(&soc->cmn_init_done)) {
1630 		for (i = 0;
1631 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1632 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
1633 
1634 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1635 	}
1636 }
1637 
1638 /*
1639  * dp_soc_attach_poll() - Register handlers for DP interrupts
1640  * @txrx_soc: DP SOC handle
1641  *
1642  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1643  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1644  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1645  *
1646  * Return: 0 for success, nonzero for failure.
1647  */
1648 static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
1649 {
1650 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1651 	int i;
1652 
1653 	soc->intr_mode = DP_INTR_POLL;
1654 
1655 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1656 		soc->intr_ctx[i].dp_intr_id = i;
1657 		soc->intr_ctx[i].tx_ring_mask =
1658 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1659 		soc->intr_ctx[i].rx_ring_mask =
1660 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1661 		soc->intr_ctx[i].rx_mon_ring_mask =
1662 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1663 		soc->intr_ctx[i].rx_err_ring_mask =
1664 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1665 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1666 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1667 		soc->intr_ctx[i].reo_status_ring_mask =
1668 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1669 		soc->intr_ctx[i].rxdma2host_ring_mask =
1670 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1671 		soc->intr_ctx[i].soc = soc;
1672 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1673 	}
1674 
1675 	qdf_timer_init(soc->osdev, &soc->int_timer,
1676 			dp_interrupt_timer, (void *)soc,
1677 			QDF_TIMER_TYPE_WAKE_APPS);
1678 
1679 	return QDF_STATUS_SUCCESS;
1680 }
1681 
1682 /**
1683  * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
1684  * soc: DP soc handle
1685  *
1686  * Set the appropriate interrupt mode flag in the soc
1687  */
1688 static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
1689 {
1690 	uint32_t msi_base_data, msi_vector_start;
1691 	int msi_vector_count, ret;
1692 
1693 	soc->intr_mode = DP_INTR_LEGACY;
1694 
1695 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1696 	    (soc->cdp_soc.ol_ops->get_con_mode &&
1697 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
1698 		soc->intr_mode = DP_INTR_POLL;
1699 	} else {
1700 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1701 						  &msi_vector_count,
1702 						  &msi_base_data,
1703 						  &msi_vector_start);
1704 		if (ret)
1705 			return;
1706 
1707 		soc->intr_mode = DP_INTR_MSI;
1708 	}
1709 }
1710 
1711 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
1712 #if defined(DP_INTR_POLL_BOTH)
1713 /*
1714  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1715  * @txrx_soc: DP SOC handle
1716  *
1717  * Call the appropriate attach function based on the mode of operation.
1718  * This is a WAR for enabling monitor mode.
1719  *
1720  * Return: 0 for success. nonzero for failure.
1721  */
1722 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
1723 {
1724 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1725 
1726 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1727 	    (soc->cdp_soc.ol_ops->get_con_mode &&
1728 	     soc->cdp_soc.ol_ops->get_con_mode() ==
1729 	     QDF_GLOBAL_MONITOR_MODE)) {
1730 		dp_info("Poll mode");
1731 		return dp_soc_attach_poll(txrx_soc);
1732 	} else {
1733 		dp_info("Interrupt  mode");
1734 		return dp_soc_interrupt_attach(txrx_soc);
1735 	}
1736 }
1737 #else
1738 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
1739 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
1740 {
1741 	return dp_soc_attach_poll(txrx_soc);
1742 }
1743 #else
1744 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
1745 {
1746 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1747 
1748 	if (hif_is_polled_mode_enabled(soc->hif_handle))
1749 		return dp_soc_attach_poll(txrx_soc);
1750 	else
1751 		return dp_soc_interrupt_attach(txrx_soc);
1752 }
1753 #endif
1754 #endif
1755 
1756 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1757 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1758 {
1759 	int j;
1760 	int num_irq = 0;
1761 
1762 	int tx_mask =
1763 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1764 	int rx_mask =
1765 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1766 	int rx_mon_mask =
1767 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1768 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1769 					soc->wlan_cfg_ctx, intr_ctx_num);
1770 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1771 					soc->wlan_cfg_ctx, intr_ctx_num);
1772 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1773 					soc->wlan_cfg_ctx, intr_ctx_num);
1774 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1775 					soc->wlan_cfg_ctx, intr_ctx_num);
1776 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1777 					soc->wlan_cfg_ctx, intr_ctx_num);
1778 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
1779 					soc->wlan_cfg_ctx, intr_ctx_num);
1780 
1781 	soc->intr_mode = DP_INTR_LEGACY;
1782 
1783 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1784 
1785 		if (tx_mask & (1 << j)) {
1786 			irq_id_map[num_irq++] =
1787 				(wbm2host_tx_completions_ring1 - j);
1788 		}
1789 
1790 		if (rx_mask & (1 << j)) {
1791 			irq_id_map[num_irq++] =
1792 				(reo2host_destination_ring1 - j);
1793 		}
1794 
1795 		if (rxdma2host_ring_mask & (1 << j)) {
1796 			irq_id_map[num_irq++] =
1797 				rxdma2host_destination_ring_mac1 -
1798 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1799 		}
1800 
1801 		if (host2rxdma_ring_mask & (1 << j)) {
1802 			irq_id_map[num_irq++] =
1803 				host2rxdma_host_buf_ring_mac1 -
1804 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1805 		}
1806 
1807 		if (host2rxdma_mon_ring_mask & (1 << j)) {
1808 			irq_id_map[num_irq++] =
1809 				host2rxdma_monitor_ring1 -
1810 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1811 		}
1812 
1813 		if (rx_mon_mask & (1 << j)) {
1814 			irq_id_map[num_irq++] =
1815 				ppdu_end_interrupts_mac1 -
1816 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1817 			irq_id_map[num_irq++] =
1818 				rxdma2host_monitor_status_ring_mac1 -
1819 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1820 		}
1821 
1822 		if (rx_wbm_rel_ring_mask & (1 << j))
1823 			irq_id_map[num_irq++] = wbm2host_rx_release;
1824 
1825 		if (rx_err_ring_mask & (1 << j))
1826 			irq_id_map[num_irq++] = reo2host_exception;
1827 
1828 		if (reo_status_ring_mask & (1 << j))
1829 			irq_id_map[num_irq++] = reo2host_status;
1830 
1831 	}
1832 	*num_irq_r = num_irq;
1833 }
1834 
1835 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1836 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1837 		int msi_vector_count, int msi_vector_start)
1838 {
1839 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1840 					soc->wlan_cfg_ctx, intr_ctx_num);
1841 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1842 					soc->wlan_cfg_ctx, intr_ctx_num);
1843 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1844 					soc->wlan_cfg_ctx, intr_ctx_num);
1845 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1846 					soc->wlan_cfg_ctx, intr_ctx_num);
1847 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1848 					soc->wlan_cfg_ctx, intr_ctx_num);
1849 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1850 					soc->wlan_cfg_ctx, intr_ctx_num);
1851 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1852 					soc->wlan_cfg_ctx, intr_ctx_num);
1853 
1854 	unsigned int vector =
1855 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1856 	int num_irq = 0;
1857 
1858 	soc->intr_mode = DP_INTR_MSI;
1859 
1860 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1861 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1862 		irq_id_map[num_irq++] =
1863 			pld_get_msi_irq(soc->osdev->dev, vector);
1864 
1865 	*num_irq_r = num_irq;
1866 }
1867 
1868 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1869 				    int *irq_id_map, int *num_irq)
1870 {
1871 	int msi_vector_count, ret;
1872 	uint32_t msi_base_data, msi_vector_start;
1873 
1874 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1875 					    &msi_vector_count,
1876 					    &msi_base_data,
1877 					    &msi_vector_start);
1878 	if (ret)
1879 		return dp_soc_interrupt_map_calculate_integrated(soc,
1880 				intr_ctx_num, irq_id_map, num_irq);
1881 
1882 	else
1883 		dp_soc_interrupt_map_calculate_msi(soc,
1884 				intr_ctx_num, irq_id_map, num_irq,
1885 				msi_vector_count, msi_vector_start);
1886 }
1887 
1888 /*
1889  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1890  * @txrx_soc: DP SOC handle
1891  *
1892  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1893  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1894  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1895  *
1896  * Return: 0 for success. nonzero for failure.
1897  */
1898 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
1899 {
1900 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1901 
1902 	int i = 0;
1903 	int num_irq = 0;
1904 
1905 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1906 		int ret = 0;
1907 
1908 		/* Map of IRQ ids registered with one interrupt context */
1909 		int irq_id_map[HIF_MAX_GRP_IRQ];
1910 
1911 		int tx_mask =
1912 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1913 		int rx_mask =
1914 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1915 		int rx_mon_mask =
1916 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
1917 		int rx_err_ring_mask =
1918 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1919 		int rx_wbm_rel_ring_mask =
1920 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1921 		int reo_status_ring_mask =
1922 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1923 		int rxdma2host_ring_mask =
1924 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1925 		int host2rxdma_ring_mask =
1926 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1927 		int host2rxdma_mon_ring_mask =
1928 			wlan_cfg_get_host2rxdma_mon_ring_mask(
1929 				soc->wlan_cfg_ctx, i);
1930 
1931 		soc->intr_ctx[i].dp_intr_id = i;
1932 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1933 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1934 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1935 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1936 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1937 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1938 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1939 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1940 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
1941 			 host2rxdma_mon_ring_mask;
1942 
1943 		soc->intr_ctx[i].soc = soc;
1944 
1945 		num_irq = 0;
1946 
1947 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1948 					       &num_irq);
1949 
1950 		ret = hif_register_ext_group(soc->hif_handle,
1951 				num_irq, irq_id_map, dp_service_srngs,
1952 				&soc->intr_ctx[i], "dp_intr",
1953 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1954 
1955 		if (ret) {
1956 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1957 			FL("failed, ret = %d"), ret);
1958 
1959 			return QDF_STATUS_E_FAILURE;
1960 		}
1961 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1962 	}
1963 
1964 	hif_configure_ext_group_interrupts(soc->hif_handle);
1965 
1966 	return QDF_STATUS_SUCCESS;
1967 }
1968 
1969 /*
1970  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1971  * @txrx_soc: DP SOC handle
1972  *
1973  * Return: none
1974  */
1975 static void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
1976 {
1977 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1978 	int i;
1979 
1980 	if (soc->intr_mode == DP_INTR_POLL) {
1981 		qdf_timer_stop(&soc->int_timer);
1982 		qdf_timer_free(&soc->int_timer);
1983 	} else {
1984 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1985 	}
1986 
1987 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1988 		soc->intr_ctx[i].tx_ring_mask = 0;
1989 		soc->intr_ctx[i].rx_ring_mask = 0;
1990 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1991 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1992 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1993 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1994 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1995 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1996 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
1997 
1998 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1999 	}
2000 }
2001 
2002 #define AVG_MAX_MPDUS_PER_TID 128
2003 #define AVG_TIDS_PER_CLIENT 2
2004 #define AVG_FLOWS_PER_TID 2
2005 #define AVG_MSDUS_PER_FLOW 128
2006 #define AVG_MSDUS_PER_MPDU 4
2007 
2008 /*
2009  * Allocate and setup link descriptor pool that will be used by HW for
2010  * various link and queue descriptors and managed by WBM
2011  */
2012 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
2013 {
2014 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
2015 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
2016 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
2017 	uint32_t num_mpdus_per_link_desc =
2018 		hal_num_mpdus_per_link_desc(soc->hal_soc);
2019 	uint32_t num_msdus_per_link_desc =
2020 		hal_num_msdus_per_link_desc(soc->hal_soc);
2021 	uint32_t num_mpdu_links_per_queue_desc =
2022 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
2023 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
2024 	uint32_t total_link_descs, total_mem_size;
2025 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
2026 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
2027 	uint32_t num_link_desc_banks;
2028 	uint32_t last_bank_size = 0;
2029 	uint32_t entry_size, num_entries;
2030 	int i;
2031 	uint32_t desc_id = 0;
2032 	qdf_dma_addr_t *baseaddr = NULL;
2033 
2034 	/* Only Tx queue descriptors are allocated from common link descriptor
2035 	 * pool Rx queue descriptors are not included in this because (REO queue
2036 	 * extension descriptors) they are expected to be allocated contiguously
2037 	 * with REO queue descriptors
2038 	 */
2039 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2040 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
2041 
2042 	num_mpdu_queue_descs = num_mpdu_link_descs /
2043 		num_mpdu_links_per_queue_desc;
2044 
2045 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2046 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
2047 		num_msdus_per_link_desc;
2048 
2049 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2050 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
2051 
2052 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
2053 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
2054 
2055 	/* Round up to power of 2 */
2056 	total_link_descs = 1;
2057 	while (total_link_descs < num_entries)
2058 		total_link_descs <<= 1;
2059 
2060 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2061 		FL("total_link_descs: %u, link_desc_size: %d"),
2062 		total_link_descs, link_desc_size);
2063 	total_mem_size =  total_link_descs * link_desc_size;
2064 
2065 	total_mem_size += link_desc_align;
2066 
2067 	if (total_mem_size <= max_alloc_size) {
2068 		num_link_desc_banks = 0;
2069 		last_bank_size = total_mem_size;
2070 	} else {
2071 		num_link_desc_banks = (total_mem_size) /
2072 			(max_alloc_size - link_desc_align);
2073 		last_bank_size = total_mem_size %
2074 			(max_alloc_size - link_desc_align);
2075 	}
2076 
2077 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2078 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
2079 		total_mem_size, num_link_desc_banks);
2080 
2081 	for (i = 0; i < num_link_desc_banks; i++) {
2082 		if (!dp_is_soc_reinit(soc)) {
2083 			baseaddr = &soc->link_desc_banks[i].
2084 					base_paddr_unaligned;
2085 			soc->link_desc_banks[i].base_vaddr_unaligned =
2086 				qdf_mem_alloc_consistent(soc->osdev,
2087 							 soc->osdev->dev,
2088 							 max_alloc_size,
2089 							 baseaddr);
2090 		}
2091 		soc->link_desc_banks[i].size = max_alloc_size;
2092 
2093 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
2094 			soc->link_desc_banks[i].base_vaddr_unaligned) +
2095 			((unsigned long)(
2096 			soc->link_desc_banks[i].base_vaddr_unaligned) %
2097 			link_desc_align));
2098 
2099 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
2100 			soc->link_desc_banks[i].base_paddr_unaligned) +
2101 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
2102 			(unsigned long)(
2103 			soc->link_desc_banks[i].base_vaddr_unaligned));
2104 
2105 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
2106 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2107 				FL("Link descriptor memory alloc failed"));
2108 			goto fail;
2109 		}
2110 		if (!dp_is_soc_reinit(soc)) {
2111 			qdf_minidump_log(soc->link_desc_banks[i].base_vaddr,
2112 					 soc->link_desc_banks[i].size,
2113 					 "link_desc_bank");
2114 		}
2115 		qdf_minidump_log((soc->link_desc_banks[i].base_vaddr),
2116 				 soc->link_desc_banks[i].size,
2117 				 "link_desc_bank");
2118 	}
2119 
2120 	if (last_bank_size) {
2121 		/* Allocate last bank in case total memory required is not exact
2122 		 * multiple of max_alloc_size
2123 		 */
2124 		if (!dp_is_soc_reinit(soc)) {
2125 			baseaddr = &soc->link_desc_banks[i].
2126 					base_paddr_unaligned;
2127 			soc->link_desc_banks[i].base_vaddr_unaligned =
2128 				qdf_mem_alloc_consistent(soc->osdev,
2129 							 soc->osdev->dev,
2130 							 last_bank_size,
2131 							 baseaddr);
2132 		}
2133 		soc->link_desc_banks[i].size = last_bank_size;
2134 
2135 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
2136 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
2137 			((unsigned long)(
2138 			soc->link_desc_banks[i].base_vaddr_unaligned) %
2139 			link_desc_align));
2140 
2141 		soc->link_desc_banks[i].base_paddr =
2142 			(unsigned long)(
2143 			soc->link_desc_banks[i].base_paddr_unaligned) +
2144 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
2145 			(unsigned long)(
2146 			soc->link_desc_banks[i].base_vaddr_unaligned));
2147 
2148 		if (!dp_is_soc_reinit(soc)) {
2149 			qdf_minidump_log(soc->link_desc_banks[i].base_vaddr,
2150 					 soc->link_desc_banks[i].size,
2151 					 "link_desc_bank");
2152 		}
2153 		qdf_minidump_log((soc->link_desc_banks[i].base_vaddr),
2154 				 soc->link_desc_banks[i].size,
2155 				 "link_desc_bank");
2156 	}
2157 
2158 
2159 	/* Allocate and setup link descriptor idle list for HW internal use */
2160 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
2161 	total_mem_size = entry_size * total_link_descs;
2162 
2163 	if (total_mem_size <= max_alloc_size) {
2164 		void *desc;
2165 
2166 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
2167 				  WBM_IDLE_LINK, 0, 0, total_link_descs, 0)) {
2168 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2169 				FL("Link desc idle ring setup failed"));
2170 			goto fail;
2171 		}
2172 
2173 		qdf_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
2174 				 soc->wbm_idle_link_ring.alloc_size,
2175 				 "wbm_idle_link_ring");
2176 
2177 		hal_srng_access_start_unlocked(soc->hal_soc,
2178 			soc->wbm_idle_link_ring.hal_srng);
2179 
2180 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
2181 			soc->link_desc_banks[i].base_paddr; i++) {
2182 			uint32_t num_entries = (soc->link_desc_banks[i].size -
2183 				((unsigned long)(
2184 				soc->link_desc_banks[i].base_vaddr) -
2185 				(unsigned long)(
2186 				soc->link_desc_banks[i].base_vaddr_unaligned)))
2187 				/ link_desc_size;
2188 			unsigned long paddr = (unsigned long)(
2189 				soc->link_desc_banks[i].base_paddr);
2190 
2191 			while (num_entries && (desc = hal_srng_src_get_next(
2192 				soc->hal_soc,
2193 				soc->wbm_idle_link_ring.hal_srng))) {
2194 				hal_set_link_desc_addr(desc,
2195 					LINK_DESC_COOKIE(desc_id, i), paddr);
2196 				num_entries--;
2197 				desc_id++;
2198 				paddr += link_desc_size;
2199 			}
2200 		}
2201 		hal_srng_access_end_unlocked(soc->hal_soc,
2202 			soc->wbm_idle_link_ring.hal_srng);
2203 	} else {
2204 		uint32_t num_scatter_bufs;
2205 		uint32_t num_entries_per_buf;
2206 		uint32_t rem_entries;
2207 		uint8_t *scatter_buf_ptr;
2208 		uint16_t scatter_buf_num;
2209 		uint32_t buf_size = 0;
2210 
2211 		soc->wbm_idle_scatter_buf_size =
2212 			hal_idle_list_scatter_buf_size(soc->hal_soc);
2213 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
2214 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
2215 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
2216 					soc->hal_soc, total_mem_size,
2217 					soc->wbm_idle_scatter_buf_size);
2218 
2219 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
2220 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2221 					FL("scatter bufs size out of bounds"));
2222 			goto fail;
2223 		}
2224 
2225 		for (i = 0; i < num_scatter_bufs; i++) {
2226 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
2227 			if (!dp_is_soc_reinit(soc)) {
2228 				buf_size = soc->wbm_idle_scatter_buf_size;
2229 				soc->wbm_idle_scatter_buf_base_vaddr[i] =
2230 					qdf_mem_alloc_consistent(soc->osdev,
2231 								 soc->osdev->
2232 								 dev,
2233 								 buf_size,
2234 								 baseaddr);
2235 			}
2236 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2237 				QDF_TRACE(QDF_MODULE_ID_DP,
2238 					  QDF_TRACE_LEVEL_ERROR,
2239 					  FL("Scatter lst memory alloc fail"));
2240 				goto fail;
2241 			}
2242 		}
2243 
2244 		/* Populate idle list scatter buffers with link descriptor
2245 		 * pointers
2246 		 */
2247 		scatter_buf_num = 0;
2248 		scatter_buf_ptr = (uint8_t *)(
2249 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
2250 		rem_entries = num_entries_per_buf;
2251 
2252 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
2253 			soc->link_desc_banks[i].base_paddr; i++) {
2254 			uint32_t num_link_descs =
2255 				(soc->link_desc_banks[i].size -
2256 				((unsigned long)(
2257 				soc->link_desc_banks[i].base_vaddr) -
2258 				(unsigned long)(
2259 				soc->link_desc_banks[i].base_vaddr_unaligned)))
2260 				/ link_desc_size;
2261 			unsigned long paddr = (unsigned long)(
2262 				soc->link_desc_banks[i].base_paddr);
2263 
2264 			while (num_link_descs) {
2265 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
2266 					LINK_DESC_COOKIE(desc_id, i), paddr);
2267 				num_link_descs--;
2268 				desc_id++;
2269 				paddr += link_desc_size;
2270 				rem_entries--;
2271 				if (rem_entries) {
2272 					scatter_buf_ptr += entry_size;
2273 				} else {
2274 					rem_entries = num_entries_per_buf;
2275 					scatter_buf_num++;
2276 
2277 					if (scatter_buf_num >= num_scatter_bufs)
2278 						break;
2279 
2280 					scatter_buf_ptr = (uint8_t *)(
2281 						soc->wbm_idle_scatter_buf_base_vaddr[
2282 						scatter_buf_num]);
2283 				}
2284 			}
2285 		}
2286 		/* Setup link descriptor idle list in HW */
2287 		hal_setup_link_idle_list(soc->hal_soc,
2288 			soc->wbm_idle_scatter_buf_base_paddr,
2289 			soc->wbm_idle_scatter_buf_base_vaddr,
2290 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
2291 			(uint32_t)(scatter_buf_ptr -
2292 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
2293 			scatter_buf_num-1])), total_link_descs);
2294 	}
2295 	return 0;
2296 
2297 fail:
2298 	if (soc->wbm_idle_link_ring.hal_srng) {
2299 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
2300 				WBM_IDLE_LINK, 0);
2301 	}
2302 
2303 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2304 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2305 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2306 				soc->wbm_idle_scatter_buf_size,
2307 				soc->wbm_idle_scatter_buf_base_vaddr[i],
2308 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
2309 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
2310 		}
2311 	}
2312 
2313 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
2314 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
2315 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2316 				soc->link_desc_banks[i].size,
2317 				soc->link_desc_banks[i].base_vaddr_unaligned,
2318 				soc->link_desc_banks[i].base_paddr_unaligned,
2319 				0);
2320 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
2321 		}
2322 	}
2323 	return QDF_STATUS_E_FAILURE;
2324 }
2325 
2326 /*
2327  * Free link descriptor pool that was setup HW
2328  */
2329 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
2330 {
2331 	int i;
2332 
2333 	if (soc->wbm_idle_link_ring.hal_srng) {
2334 		qdf_minidump_remove(
2335 			soc->wbm_idle_link_ring.base_vaddr_unaligned);
2336 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
2337 			WBM_IDLE_LINK, 0);
2338 	}
2339 
2340 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2341 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2342 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2343 				soc->wbm_idle_scatter_buf_size,
2344 				soc->wbm_idle_scatter_buf_base_vaddr[i],
2345 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
2346 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
2347 		}
2348 	}
2349 
2350 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
2351 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
2352 			qdf_minidump_remove(soc->link_desc_banks[i].base_vaddr);
2353 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2354 				soc->link_desc_banks[i].size,
2355 				soc->link_desc_banks[i].base_vaddr_unaligned,
2356 				soc->link_desc_banks[i].base_paddr_unaligned,
2357 				0);
2358 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
2359 		}
2360 	}
2361 }
2362 
2363 #ifdef IPA_OFFLOAD
2364 #define REO_DST_RING_SIZE_QCA6290 1023
2365 #ifndef QCA_WIFI_QCA8074_VP
2366 #define REO_DST_RING_SIZE_QCA8074 1023
2367 #define REO_DST_RING_SIZE_QCN9000 2048
2368 #else
2369 #define REO_DST_RING_SIZE_QCA8074 8
2370 #define REO_DST_RING_SIZE_QCN9000 8
2371 #endif /* QCA_WIFI_QCA8074_VP */
2372 
2373 #else
2374 
2375 #define REO_DST_RING_SIZE_QCA6290 1024
2376 #ifndef QCA_WIFI_QCA8074_VP
2377 #define REO_DST_RING_SIZE_QCA8074 2048
2378 #define REO_DST_RING_SIZE_QCN9000 2048
2379 #else
2380 #define REO_DST_RING_SIZE_QCA8074 8
2381 #define REO_DST_RING_SIZE_QCN9000 8
2382 #endif /* QCA_WIFI_QCA8074_VP */
2383 #endif /* IPA_OFFLOAD */
2384 
2385 #ifndef FEATURE_WDS
2386 static void dp_soc_wds_attach(struct dp_soc *soc)
2387 {
2388 }
2389 
2390 static void dp_soc_wds_detach(struct dp_soc *soc)
2391 {
2392 }
2393 #endif
2394 /*
2395  * dp_soc_reset_ring_map() - Reset cpu ring map
2396  * @soc: Datapath soc handler
2397  *
2398  * This api resets the default cpu ring map
2399  */
2400 
2401 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
2402 {
2403 	uint8_t i;
2404 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2405 
2406 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
2407 		switch (nss_config) {
2408 		case dp_nss_cfg_first_radio:
2409 			/*
2410 			 * Setting Tx ring map for one nss offloaded radio
2411 			 */
2412 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
2413 			break;
2414 
2415 		case dp_nss_cfg_second_radio:
2416 			/*
2417 			 * Setting Tx ring for two nss offloaded radios
2418 			 */
2419 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
2420 			break;
2421 
2422 		case dp_nss_cfg_dbdc:
2423 			/*
2424 			 * Setting Tx ring map for 2 nss offloaded radios
2425 			 */
2426 			soc->tx_ring_map[i] =
2427 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
2428 			break;
2429 
2430 		case dp_nss_cfg_dbtc:
2431 			/*
2432 			 * Setting Tx ring map for 3 nss offloaded radios
2433 			 */
2434 			soc->tx_ring_map[i] =
2435 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
2436 			break;
2437 
2438 		default:
2439 			dp_err("tx_ring_map failed due to invalid nss cfg");
2440 			break;
2441 		}
2442 	}
2443 }
2444 
2445 /*
2446  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
2447  * @dp_soc - DP soc handle
2448  * @ring_type - ring type
2449  * @ring_num - ring_num
2450  *
2451  * return 0 or 1
2452  */
2453 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
2454 {
2455 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2456 	uint8_t status = 0;
2457 
2458 	switch (ring_type) {
2459 	case WBM2SW_RELEASE:
2460 	case REO_DST:
2461 	case RXDMA_BUF:
2462 		status = ((nss_config) & (1 << ring_num));
2463 		break;
2464 	default:
2465 		break;
2466 	}
2467 
2468 	return status;
2469 }
2470 
2471 /*
2472  * dp_soc_disable_mac2_intr_mask() - reset interrupt mask for WMAC2 hw rings
2473  * @dp_soc - DP Soc handle
2474  *
2475  * Return: Return void
2476  */
2477 static void dp_soc_disable_mac2_intr_mask(struct dp_soc *soc)
2478 {
2479 	int *grp_mask = NULL;
2480 	int group_number;
2481 
2482 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2483 	group_number = dp_srng_find_ring_in_mask(0x2, grp_mask);
2484 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2485 					  group_number, 0x0);
2486 
2487 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
2488 	group_number = dp_srng_find_ring_in_mask(0x2, grp_mask);
2489 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
2490 				      group_number, 0x0);
2491 
2492 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
2493 	group_number = dp_srng_find_ring_in_mask(0x2, grp_mask);
2494 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
2495 					  group_number, 0x0);
2496 
2497 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
2498 	group_number = dp_srng_find_ring_in_mask(0x2, grp_mask);
2499 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
2500 					      group_number, 0x0);
2501 }
2502 
2503 /*
2504  * dp_soc_reset_intr_mask() - reset interrupt mask
2505  * @dp_soc - DP Soc handle
2506  *
2507  * Return: Return void
2508  */
2509 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
2510 {
2511 	uint8_t j;
2512 	int *grp_mask = NULL;
2513 	int group_number, mask, num_ring;
2514 
2515 	/* number of tx ring */
2516 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2517 
2518 	/*
2519 	 * group mask for tx completion  ring.
2520 	 */
2521 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2522 
2523 	/* loop and reset the mask for only offloaded ring */
2524 	for (j = 0; j < num_ring; j++) {
2525 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2526 			continue;
2527 		}
2528 
2529 		/*
2530 		 * Group number corresponding to tx offloaded ring.
2531 		 */
2532 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2533 		if (group_number < 0) {
2534 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2535 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2536 					WBM2SW_RELEASE, j);
2537 			return;
2538 		}
2539 
2540 		/* reset the tx mask for offloaded ring */
2541 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2542 		mask &= (~(1 << j));
2543 
2544 		/*
2545 		 * reset the interrupt mask for offloaded ring.
2546 		 */
2547 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2548 	}
2549 
2550 	/* number of rx rings */
2551 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2552 
2553 	/*
2554 	 * group mask for reo destination ring.
2555 	 */
2556 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2557 
2558 	/* loop and reset the mask for only offloaded ring */
2559 	for (j = 0; j < num_ring; j++) {
2560 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2561 			continue;
2562 		}
2563 
2564 		/*
2565 		 * Group number corresponding to rx offloaded ring.
2566 		 */
2567 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2568 		if (group_number < 0) {
2569 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2570 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2571 					REO_DST, j);
2572 			return;
2573 		}
2574 
2575 		/* set the interrupt mask for offloaded ring */
2576 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2577 		mask &= (~(1 << j));
2578 
2579 		/*
2580 		 * set the interrupt mask to zero for rx offloaded radio.
2581 		 */
2582 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2583 	}
2584 
2585 	/*
2586 	 * group mask for Rx buffer refill ring
2587 	 */
2588 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2589 
2590 	/* loop and reset the mask for only offloaded ring */
2591 	for (j = 0; j < MAX_PDEV_CNT; j++) {
2592 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2593 			continue;
2594 		}
2595 
2596 		/*
2597 		 * Group number corresponding to rx offloaded ring.
2598 		 */
2599 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2600 		if (group_number < 0) {
2601 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2602 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2603 					REO_DST, j);
2604 			return;
2605 		}
2606 
2607 		/* set the interrupt mask for offloaded ring */
2608 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2609 				group_number);
2610 		mask &= (~(1 << j));
2611 
2612 		/*
2613 		 * set the interrupt mask to zero for rx offloaded radio.
2614 		 */
2615 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2616 			group_number, mask);
2617 	}
2618 }
2619 
2620 #ifdef IPA_OFFLOAD
2621 /**
2622  * dp_reo_remap_config() - configure reo remap register value based
2623  *                         nss configuration.
2624  *		based on offload_radio value below remap configuration
2625  *		get applied.
2626  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2627  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2628  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2629  *		3 - both Radios handled by NSS (remap not required)
2630  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2631  *
2632  * @remap1: output parameter indicates reo remap 1 register value
2633  * @remap2: output parameter indicates reo remap 2 register value
2634  * Return: bool type, true if remap is configured else false.
2635  */
2636 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap1, uint32_t *remap2)
2637 {
2638 	*remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW1, 16) |
2639 		  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 17) |
2640 		  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 18) |
2641 		  HAL_REO_REMAP_IX2(REO_REMAP_SW1, 19) |
2642 		  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 20) |
2643 		  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 21) |
2644 		  HAL_REO_REMAP_IX2(REO_REMAP_SW1, 22) |
2645 		  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 23);
2646 
2647 	*remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW3, 24) |
2648 		  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 25) |
2649 		  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 26) |
2650 		  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 27) |
2651 		  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 28) |
2652 		  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 29) |
2653 		  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 30) |
2654 		  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 31);
2655 
2656 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
2657 
2658 	return true;
2659 }
2660 #else
2661 static bool dp_reo_remap_config(struct dp_soc *soc,
2662 				uint32_t *remap1,
2663 				uint32_t *remap2)
2664 {
2665 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2666 	uint8_t target_type;
2667 
2668 	target_type = hal_get_target_type(soc->hal_soc);
2669 
2670 	switch (offload_radio) {
2671 	case dp_nss_cfg_default:
2672 		*remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW1, 16) |
2673 			  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 17) |
2674 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 18) |
2675 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 19) |
2676 			  HAL_REO_REMAP_IX2(REO_REMAP_SW1, 20) |
2677 			  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 21) |
2678 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 22) |
2679 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 23);
2680 
2681 		*remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW1, 24) |
2682 			  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 25) |
2683 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 26) |
2684 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 27) |
2685 			  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 28) |
2686 			  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 29) |
2687 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 30) |
2688 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 31);
2689 		break;
2690 	case dp_nss_cfg_first_radio:
2691 		*remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW2, 16) |
2692 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 17) |
2693 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 18) |
2694 			  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 19) |
2695 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 20) |
2696 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 21) |
2697 			  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 22) |
2698 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 23);
2699 
2700 		*remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW4, 24) |
2701 			  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 25) |
2702 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 26) |
2703 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 27) |
2704 			  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 28) |
2705 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 29) |
2706 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 30) |
2707 			  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 31);
2708 		break;
2709 	case dp_nss_cfg_second_radio:
2710 		*remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW1, 16) |
2711 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 17) |
2712 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 18) |
2713 			  HAL_REO_REMAP_IX2(REO_REMAP_SW1, 19) |
2714 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 20) |
2715 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 21) |
2716 			  HAL_REO_REMAP_IX2(REO_REMAP_SW1, 22) |
2717 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 23);
2718 
2719 		*remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW4, 24) |
2720 			  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 25) |
2721 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 26) |
2722 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 27) |
2723 			  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 28) |
2724 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 29) |
2725 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 30) |
2726 			  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 31);
2727 
2728 		break;
2729 	case dp_nss_cfg_dbdc:
2730 	case dp_nss_cfg_dbtc:
2731 		/* return false if both or all are offloaded to NSS */
2732 		return false;
2733 	}
2734 
2735 	dp_debug("remap1 %x remap2 %x offload_radio %u",
2736 		 *remap1, *remap2, offload_radio);
2737 	return true;
2738 }
2739 #endif /* IPA_OFFLOAD */
2740 
2741 /*
2742  * dp_reo_frag_dst_set() - configure reo register to set the
2743  *                        fragment destination ring
2744  * @soc : Datapath soc
2745  * @frag_dst_ring : output parameter to set fragment destination ring
2746  *
2747  * Based on offload_radio below fragment destination rings is selected
2748  * 0 - TCL
2749  * 1 - SW1
2750  * 2 - SW2
2751  * 3 - SW3
2752  * 4 - SW4
2753  * 5 - Release
2754  * 6 - FW
2755  * 7 - alternate select
2756  *
2757  * return: void
2758  */
2759 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2760 {
2761 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2762 
2763 	switch (offload_radio) {
2764 	case dp_nss_cfg_default:
2765 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2766 		break;
2767 	case dp_nss_cfg_first_radio:
2768 		/*
2769 		 * This configuration is valid for single band radio which
2770 		 * is also NSS offload.
2771 		 */
2772 	case dp_nss_cfg_dbdc:
2773 	case dp_nss_cfg_dbtc:
2774 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2775 		break;
2776 	default:
2777 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2778 				FL("dp_reo_frag_dst_set invalid offload radio config"));
2779 		break;
2780 	}
2781 }
2782 
2783 #ifdef ENABLE_VERBOSE_DEBUG
2784 static void dp_enable_verbose_debug(struct dp_soc *soc)
2785 {
2786 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2787 
2788 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2789 
2790 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
2791 		is_dp_verbose_debug_enabled = true;
2792 
2793 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
2794 		hal_set_verbose_debug(true);
2795 	else
2796 		hal_set_verbose_debug(false);
2797 }
2798 #else
2799 static void dp_enable_verbose_debug(struct dp_soc *soc)
2800 {
2801 }
2802 #endif
2803 
2804 /*
2805  * dp_soc_cmn_setup() - Common SoC level initializion
2806  * @soc:		Datapath SOC handle
2807  *
2808  * This is an internal function used to setup common SOC data structures,
2809  * to be called from PDEV attach after receiving HW mode capabilities from FW
2810  */
2811 static int dp_soc_cmn_setup(struct dp_soc *soc)
2812 {
2813 	int i, cached;
2814 	struct hal_reo_params reo_params;
2815 	int tx_ring_size;
2816 	int tx_comp_ring_size;
2817 	int reo_dst_ring_size;
2818 	uint32_t entries;
2819 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2820 
2821 	if (qdf_atomic_read(&soc->cmn_init_done))
2822 		return 0;
2823 
2824 	if (dp_hw_link_desc_pool_setup(soc))
2825 		goto fail1;
2826 
2827 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2828 
2829 	dp_enable_verbose_debug(soc);
2830 
2831 	/* Setup SRNG rings */
2832 	/* Common rings */
2833 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
2834 
2835 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
2836 			  entries, 0)) {
2837 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2838 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
2839 		goto fail1;
2840 	}
2841 
2842 	qdf_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
2843 			 soc->wbm_desc_rel_ring.alloc_size,
2844 			 "wbm_desc_rel_ring");
2845 
2846 	soc->num_tcl_data_rings = 0;
2847 	/* Tx data rings */
2848 	if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
2849 		soc->num_tcl_data_rings =
2850 			wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
2851 		tx_comp_ring_size =
2852 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2853 		tx_ring_size =
2854 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2855 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2856 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
2857 					  TCL_DATA, i, 0, tx_ring_size, 0)) {
2858 				QDF_TRACE(QDF_MODULE_ID_DP,
2859 					QDF_TRACE_LEVEL_ERROR,
2860 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
2861 				goto fail1;
2862 			}
2863 
2864 			/* Disable cached desc if NSS offload is enabled */
2865 			cached = WLAN_CFG_DST_RING_CACHED_DESC;
2866 			if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
2867 				cached = 0;
2868 			/*
2869 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2870 			 * count
2871 			 */
2872 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
2873 					  WBM2SW_RELEASE, i, 0,
2874 					  tx_comp_ring_size,
2875 					  cached)) {
2876 				QDF_TRACE(QDF_MODULE_ID_DP,
2877 					QDF_TRACE_LEVEL_ERROR,
2878 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
2879 				goto fail1;
2880 			}
2881 		}
2882 	} else {
2883 		/* This will be incremented during per pdev ring setup */
2884 		soc->num_tcl_data_rings = 0;
2885 	}
2886 
2887 	if (dp_tx_soc_attach(soc)) {
2888 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2889 				FL("dp_tx_soc_attach failed"));
2890 		goto fail1;
2891 	}
2892 
2893 	entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
2894 	/* TCL command and status rings */
2895 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
2896 			  entries, 0)) {
2897 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2898 			FL("dp_srng_setup failed for tcl_cmd_ring"));
2899 		goto fail2;
2900 	}
2901 
2902 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
2903 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
2904 			  entries, 0)) {
2905 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2906 			FL("dp_srng_setup failed for tcl_status_ring"));
2907 		goto fail2;
2908 	}
2909 
2910 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2911 
2912 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2913 	 * descriptors
2914 	 */
2915 
2916 	/* Rx data rings */
2917 	if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2918 		soc->num_reo_dest_rings =
2919 			wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
2920 		QDF_TRACE(QDF_MODULE_ID_DP,
2921 			QDF_TRACE_LEVEL_INFO,
2922 			FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
2923 
2924 		/* Disable cached desc if NSS offload is enabled */
2925 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
2926 		if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
2927 			cached = 0;
2928 
2929 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2930 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2931 					  i, 0, reo_dst_ring_size, cached)) {
2932 				QDF_TRACE(QDF_MODULE_ID_DP,
2933 					  QDF_TRACE_LEVEL_ERROR,
2934 					  FL(RNG_ERR "reo_dest_ring [%d]"), i);
2935 				goto fail2;
2936 			}
2937 		}
2938 	} else {
2939 		/* This will be incremented during per pdev ring setup */
2940 		soc->num_reo_dest_rings = 0;
2941 	}
2942 
2943 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2944 	/* LMAC RxDMA to SW Rings configuration */
2945 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
2946 		/* Only valid for MCL */
2947 		struct dp_pdev *pdev = soc->pdev_list[0];
2948 
2949 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2950 			if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2951 					  RXDMA_DST, 0, i, entries, 0)) {
2952 				QDF_TRACE(QDF_MODULE_ID_DP,
2953 					  QDF_TRACE_LEVEL_ERROR,
2954 					  FL(RNG_ERR "rxdma_err_dst_ring"));
2955 				goto fail2;
2956 			}
2957 		}
2958 	}
2959 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
2960 
2961 	/* REO reinjection ring */
2962 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
2963 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2964 			  entries, 0)) {
2965 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2966 			  FL("dp_srng_setup failed for reo_reinject_ring"));
2967 		goto fail2;
2968 	}
2969 
2970 
2971 	/* Rx release ring */
2972 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2973 			  wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx),
2974 			  0)) {
2975 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2976 			  FL("dp_srng_setup failed for rx_rel_ring"));
2977 		goto fail2;
2978 	}
2979 
2980 
2981 	/* Rx exception ring */
2982 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
2983 	if (dp_srng_setup(soc, &soc->reo_exception_ring,
2984 			  REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries, 0)) {
2985 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2986 			  FL("dp_srng_setup failed for reo_exception_ring"));
2987 		goto fail2;
2988 	}
2989 
2990 
2991 	/* REO command and status rings */
2992 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
2993 			  wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx),
2994 			  0)) {
2995 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2996 			FL("dp_srng_setup failed for reo_cmd_ring"));
2997 		goto fail2;
2998 	}
2999 
3000 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
3001 	TAILQ_INIT(&soc->rx.reo_cmd_list);
3002 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
3003 
3004 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
3005 			  wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx),
3006 			  0)) {
3007 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3008 			FL("dp_srng_setup failed for reo_status_ring"));
3009 		goto fail2;
3010 	}
3011 
3012 	/*
3013 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
3014 	 * WMAC2 is not there in IPQ6018 platform.
3015 	 */
3016 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018) {
3017 		dp_soc_disable_mac2_intr_mask(soc);
3018 	}
3019 
3020 	/* Reset the cpu ring map if radio is NSS offloaded */
3021 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
3022 		dp_soc_reset_cpu_ring_map(soc);
3023 		dp_soc_reset_intr_mask(soc);
3024 	}
3025 
3026 	/* Setup HW REO */
3027 	qdf_mem_zero(&reo_params, sizeof(reo_params));
3028 
3029 	if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
3030 
3031 		/*
3032 		 * Reo ring remap is not required if both radios
3033 		 * are offloaded to NSS
3034 		 */
3035 		if (!dp_reo_remap_config(soc,
3036 					&reo_params.remap1,
3037 					&reo_params.remap2))
3038 			goto out;
3039 
3040 		reo_params.rx_hash_enabled = true;
3041 	}
3042 
3043 	/* setup the global rx defrag waitlist */
3044 	TAILQ_INIT(&soc->rx.defrag.waitlist);
3045 	soc->rx.defrag.timeout_ms =
3046 		wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
3047 	soc->rx.defrag.next_flush_ms = 0;
3048 	soc->rx.flags.defrag_timeout_check =
3049 		wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
3050 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
3051 
3052 out:
3053 	/*
3054 	 * set the fragment destination ring
3055 	 */
3056 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
3057 
3058 	hal_reo_setup(soc->hal_soc, &reo_params);
3059 
3060 	qdf_atomic_set(&soc->cmn_init_done, 1);
3061 
3062 	dp_soc_wds_attach(soc);
3063 
3064 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
3065 	return 0;
3066 fail2:
3067 	dp_tx_soc_detach(soc);
3068 fail1:
3069 	/*
3070 	 * Cleanup will be done as part of soc_detach, which will
3071 	 * be called on pdev attach failure
3072 	 */
3073 	return QDF_STATUS_E_FAILURE;
3074 }
3075 
3076 /*
3077  * dp_soc_cmn_cleanup() - Common SoC level De-initializion
3078  *
3079  * @soc: Datapath SOC handle
3080  *
3081  * This function is responsible for cleaning up DP resource of Soc
3082  * initialled in dp_pdev_attach_wifi3-->dp_soc_cmn_setup, since
3083  * dp_soc_detach_wifi3 could not identify some of them
3084  * whether they have done initialized or not accurately.
3085  *
3086  */
3087 static void dp_soc_cmn_cleanup(struct dp_soc *soc)
3088 {
3089 	if (!dp_is_soc_reinit(soc)) {
3090 		dp_tx_soc_detach(soc);
3091 	}
3092 
3093 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3094 
3095 	dp_reo_cmdlist_destroy(soc);
3096 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3097 }
3098 
3099 static QDF_STATUS
3100 dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
3101 		     int force);
3102 
3103 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3104 {
3105 	struct cdp_lro_hash_config lro_hash;
3106 	QDF_STATUS status;
3107 
3108 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3109 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
3110 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
3111 		dp_err("LRO, GRO and RX hash disabled");
3112 		return QDF_STATUS_E_FAILURE;
3113 	}
3114 
3115 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
3116 
3117 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
3118 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
3119 		lro_hash.lro_enable = 1;
3120 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
3121 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
3122 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
3123 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
3124 	}
3125 
3126 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
3127 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
3128 		 LRO_IPV4_SEED_ARR_SZ));
3129 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
3130 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
3131 		 LRO_IPV6_SEED_ARR_SZ));
3132 
3133 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
3134 
3135 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
3136 		QDF_BUG(0);
3137 		dp_err("lro_hash_config not configured");
3138 		return QDF_STATUS_E_FAILURE;
3139 	}
3140 
3141 	status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc,
3142 						      pdev->pdev_id,
3143 						      &lro_hash);
3144 	if (!QDF_IS_STATUS_SUCCESS(status)) {
3145 		dp_err("failed to send lro_hash_config to FW %u", status);
3146 		return status;
3147 	}
3148 
3149 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
3150 		lro_hash.lro_enable, lro_hash.tcp_flag,
3151 		lro_hash.tcp_flag_mask);
3152 
3153 	dp_info("toeplitz_hash_ipv4:");
3154 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3155 			   lro_hash.toeplitz_hash_ipv4,
3156 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
3157 			   LRO_IPV4_SEED_ARR_SZ));
3158 
3159 	dp_info("toeplitz_hash_ipv6:");
3160 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3161 			   lro_hash.toeplitz_hash_ipv6,
3162 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
3163 			   LRO_IPV6_SEED_ARR_SZ));
3164 
3165 	return status;
3166 }
3167 
3168 /*
3169 * dp_rxdma_ring_setup() - configure the RX DMA rings
3170 * @soc: data path SoC handle
3171 * @pdev: Physical device handle
3172 *
3173 * Return: 0 - success, > 0 - failure
3174 */
3175 #ifdef QCA_HOST2FW_RXBUF_RING
3176 static int dp_rxdma_ring_setup(struct dp_soc *soc,
3177 	 struct dp_pdev *pdev)
3178 {
3179 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3180 	int max_mac_rings;
3181 	int i;
3182 	int ring_size;
3183 
3184 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3185 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
3186 	ring_size =  wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
3187 
3188 	for (i = 0; i < max_mac_rings; i++) {
3189 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
3190 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
3191 				  RXDMA_BUF, 1, i, ring_size, 0)) {
3192 			QDF_TRACE(QDF_MODULE_ID_DP,
3193 				 QDF_TRACE_LEVEL_ERROR,
3194 				 FL("failed rx mac ring setup"));
3195 			return QDF_STATUS_E_FAILURE;
3196 		}
3197 	}
3198 	return QDF_STATUS_SUCCESS;
3199 }
3200 #else
3201 static int dp_rxdma_ring_setup(struct dp_soc *soc,
3202 	 struct dp_pdev *pdev)
3203 {
3204 	return QDF_STATUS_SUCCESS;
3205 }
3206 #endif
3207 
3208 /**
3209  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
3210  * @pdev - DP_PDEV handle
3211  *
3212  * Return: void
3213  */
3214 static inline void
3215 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
3216 {
3217 	uint8_t map_id;
3218 	struct dp_soc *soc = pdev->soc;
3219 
3220 	if (!soc)
3221 		return;
3222 
3223 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
3224 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
3225 			     default_dscp_tid_map,
3226 			     sizeof(default_dscp_tid_map));
3227 	}
3228 
3229 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
3230 		hal_tx_set_dscp_tid_map(soc->hal_soc,
3231 					default_dscp_tid_map,
3232 					map_id);
3233 	}
3234 }
3235 
3236 /**
3237  * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
3238  * @pdev - DP_PDEV handle
3239  *
3240  * Return: void
3241  */
3242 static inline void
3243 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
3244 {
3245 	struct dp_soc *soc = pdev->soc;
3246 
3247 	if (!soc)
3248 		return;
3249 
3250 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
3251 		     sizeof(default_pcp_tid_map));
3252 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
3253 }
3254 
3255 #ifdef IPA_OFFLOAD
3256 /**
3257  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
3258  * @soc: data path instance
3259  * @pdev: core txrx pdev context
3260  *
3261  * Return: QDF_STATUS_SUCCESS: success
3262  *         QDF_STATUS_E_RESOURCES: Error return
3263  */
3264 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3265 					   struct dp_pdev *pdev)
3266 {
3267 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3268 	int entries;
3269 
3270 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3271 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
3272 
3273 	/* Setup second Rx refill buffer ring */
3274 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3275 			  IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id, entries, 0)
3276 	   ) {
3277 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3278 			FL("dp_srng_setup failed second rx refill ring"));
3279 		return QDF_STATUS_E_FAILURE;
3280 	}
3281 	return QDF_STATUS_SUCCESS;
3282 }
3283 
3284 /**
3285  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
3286  * @soc: data path instance
3287  * @pdev: core txrx pdev context
3288  *
3289  * Return: void
3290  */
3291 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3292 					      struct dp_pdev *pdev)
3293 {
3294 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3295 			IPA_RX_REFILL_BUF_RING_IDX);
3296 }
3297 
3298 #else
3299 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3300 					   struct dp_pdev *pdev)
3301 {
3302 	return QDF_STATUS_SUCCESS;
3303 }
3304 
3305 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3306 					      struct dp_pdev *pdev)
3307 {
3308 }
3309 #endif
3310 
3311 #if !defined(DISABLE_MON_CONFIG)
3312 /**
3313  * dp_mon_rings_setup() - Initialize Monitor rings based on target
3314  * @soc: soc handle
3315  * @pdev: physical device handle
3316  *
3317  * Return: nonzero on failure and zero on success
3318  */
3319 static
3320 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3321 {
3322 	int mac_id = 0;
3323 	int pdev_id = pdev->pdev_id;
3324 	int entries;
3325 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3326 
3327 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3328 
3329 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3330 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
3331 
3332 		if (soc->wlan_cfg_ctx->rxdma1_enable) {
3333 			entries =
3334 			   wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
3335 			if (dp_srng_setup(soc,
3336 					  &pdev->rxdma_mon_buf_ring[mac_id],
3337 					  RXDMA_MONITOR_BUF, 0, mac_for_pdev,
3338 					  entries, 0)) {
3339 				QDF_TRACE(QDF_MODULE_ID_DP,
3340 					  QDF_TRACE_LEVEL_ERROR,
3341 					  FL(RNG_ERR "rxdma_mon_buf_ring "));
3342 				return QDF_STATUS_E_NOMEM;
3343 			}
3344 
3345 			entries =
3346 			   wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
3347 			if (dp_srng_setup(soc,
3348 					  &pdev->rxdma_mon_dst_ring[mac_id],
3349 					  RXDMA_MONITOR_DST, 0, mac_for_pdev,
3350 					  entries, 0)) {
3351 				QDF_TRACE(QDF_MODULE_ID_DP,
3352 					  QDF_TRACE_LEVEL_ERROR,
3353 					  FL(RNG_ERR "rxdma_mon_dst_ring"));
3354 				return QDF_STATUS_E_NOMEM;
3355 			}
3356 
3357 			entries =
3358 			    wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3359 			if (dp_srng_setup(soc,
3360 					  &pdev->rxdma_mon_status_ring[mac_id],
3361 					  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
3362 					  entries, 0)) {
3363 				QDF_TRACE(QDF_MODULE_ID_DP,
3364 					  QDF_TRACE_LEVEL_ERROR,
3365 					  FL(RNG_ERR "rxdma_mon_status_ring"));
3366 				return QDF_STATUS_E_NOMEM;
3367 			}
3368 
3369 			entries =
3370 			   wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
3371 			if (dp_srng_setup(soc,
3372 					  &pdev->rxdma_mon_desc_ring[mac_id],
3373 					  RXDMA_MONITOR_DESC, 0, mac_for_pdev,
3374 					  entries, 0)) {
3375 				QDF_TRACE(QDF_MODULE_ID_DP,
3376 					  QDF_TRACE_LEVEL_ERROR,
3377 					  FL(RNG_ERR "rxdma_mon_desc_ring"));
3378 				return QDF_STATUS_E_NOMEM;
3379 			}
3380 		} else {
3381 			entries =
3382 			   wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3383 			if (dp_srng_setup(soc,
3384 					  &pdev->rxdma_mon_status_ring[mac_id],
3385 					  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
3386 					  entries, 0)) {
3387 				QDF_TRACE(QDF_MODULE_ID_DP,
3388 					  QDF_TRACE_LEVEL_ERROR,
3389 					  FL(RNG_ERR "rxdma_mon_status_ring"));
3390 				return QDF_STATUS_E_NOMEM;
3391 			}
3392 		}
3393 	}
3394 
3395 	return QDF_STATUS_SUCCESS;
3396 }
3397 #else
3398 static
3399 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3400 {
3401 	return QDF_STATUS_SUCCESS;
3402 }
3403 #endif
3404 
3405 /*dp_iterate_update_peer_list - update peer stats on cal client timer
3406  * @pdev_hdl: pdev handle
3407  */
3408 #ifdef ATH_SUPPORT_EXT_STAT
3409 void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
3410 {
3411 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
3412 	struct dp_soc *soc = pdev->soc;
3413 	struct dp_vdev *vdev = NULL;
3414 	struct dp_peer *peer = NULL;
3415 
3416 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3417 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3418 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
3419 		DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
3420 			dp_cal_client_update_peer_stats(&peer->stats);
3421 		}
3422 	}
3423 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3424 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3425 }
3426 #else
3427 void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
3428 {
3429 }
3430 #endif
3431 
3432 /*
3433  * dp_htt_ppdu_stats_attach() - attach resources for HTT PPDU stats processing
3434  * @pdev: Datapath PDEV handle
3435  *
3436  * Return: QDF_STATUS_SUCCESS: Success
3437  *         QDF_STATUS_E_NOMEM: Error
3438  */
3439 static QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev)
3440 {
3441 	pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE);
3442 
3443 	if (!pdev->ppdu_tlv_buf) {
3444 		QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail");
3445 		return QDF_STATUS_E_NOMEM;
3446 	}
3447 
3448 	return QDF_STATUS_SUCCESS;
3449 }
3450 
3451 /*
3452 * dp_pdev_attach_wifi3() - attach txrx pdev
3453 * @txrx_soc: Datapath SOC handle
3454 * @htc_handle: HTC handle for host-target interface
3455 * @qdf_osdev: QDF OS device
3456 * @pdev_id: PDEV ID
3457 *
3458 * Return: DP PDEV handle on success, NULL on failure
3459 */
3460 static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
3461 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
3462 {
3463 	int ring_size;
3464 	int entries;
3465 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3466 	int nss_cfg;
3467 	void *sojourn_buf;
3468 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3469 	struct dp_pdev *pdev = NULL;
3470 
3471 	if (dp_is_soc_reinit(soc)) {
3472 		pdev = soc->pdev_list[pdev_id];
3473 	} else {
3474 		pdev = qdf_mem_malloc(sizeof(*pdev));
3475 		qdf_minidump_log(pdev, sizeof(*pdev), "dp_pdev");
3476 	}
3477 
3478 	if (!pdev) {
3479 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3480 			FL("DP PDEV memory allocation failed"));
3481 		goto fail0;
3482 	}
3483 
3484 	/*
3485 	 * Variable to prevent double pdev deinitialization during
3486 	 * radio detach execution .i.e. in the absence of any vdev.
3487 	 */
3488 	pdev->pdev_deinit = 0;
3489 	pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
3490 
3491 	if (!pdev->invalid_peer) {
3492 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3493 			  FL("Invalid peer memory allocation failed"));
3494 		qdf_mem_free(pdev);
3495 		goto fail0;
3496 	}
3497 
3498 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3499 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
3500 
3501 	if (!pdev->wlan_cfg_ctx) {
3502 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3503 			FL("pdev cfg_attach failed"));
3504 
3505 		qdf_mem_free(pdev->invalid_peer);
3506 		qdf_mem_free(pdev);
3507 		goto fail0;
3508 	}
3509 
3510 	/*
3511 	 * set nss pdev config based on soc config
3512 	 */
3513 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
3514 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
3515 			(nss_cfg & (1 << pdev_id)));
3516 
3517 	pdev->soc = soc;
3518 	pdev->pdev_id = pdev_id;
3519 	soc->pdev_list[pdev_id] = pdev;
3520 
3521 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
3522 	soc->pdev_count++;
3523 
3524 	TAILQ_INIT(&pdev->vdev_list);
3525 	qdf_spinlock_create(&pdev->vdev_list_lock);
3526 	pdev->vdev_count = 0;
3527 
3528 	qdf_spinlock_create(&pdev->tx_mutex);
3529 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
3530 	TAILQ_INIT(&pdev->neighbour_peers_list);
3531 	pdev->neighbour_peers_added = false;
3532 	pdev->monitor_configured = false;
3533 
3534 	if (dp_soc_cmn_setup(soc)) {
3535 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3536 			FL("dp_soc_cmn_setup failed"));
3537 		goto fail1;
3538 	}
3539 
3540 	/* Setup per PDEV TCL rings if configured */
3541 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3542 		ring_size =
3543 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
3544 
3545 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
3546 				  pdev_id, pdev_id, ring_size, 0)) {
3547 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3548 				FL("dp_srng_setup failed for tcl_data_ring"));
3549 			goto fail1;
3550 		}
3551 
3552 		ring_size =
3553 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
3554 
3555 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
3556 				  WBM2SW_RELEASE, pdev_id, pdev_id,
3557 				  ring_size, 0)) {
3558 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3559 				FL("dp_srng_setup failed for tx_comp_ring"));
3560 			goto fail1;
3561 		}
3562 		soc->num_tcl_data_rings++;
3563 	}
3564 
3565 	/* Tx specific init */
3566 	if (dp_tx_pdev_attach(pdev)) {
3567 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3568 			FL("dp_tx_pdev_attach failed"));
3569 		goto fail1;
3570 	}
3571 
3572 	ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
3573 	/* Setup per PDEV REO rings if configured */
3574 	if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
3575 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
3576 				  pdev_id, pdev_id, ring_size, 0)) {
3577 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3578 				FL("dp_srng_setup failed for reo_dest_ringn"));
3579 			goto fail1;
3580 		}
3581 		soc->num_reo_dest_rings++;
3582 	}
3583 
3584 	ring_size =
3585 		wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc->wlan_cfg_ctx);
3586 
3587 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
3588 			  ring_size, 0)) {
3589 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3590 			 FL("dp_srng_setup failed rx refill ring"));
3591 		goto fail1;
3592 	}
3593 
3594 	if (dp_rxdma_ring_setup(soc, pdev)) {
3595 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3596 			 FL("RXDMA ring config failed"));
3597 		goto fail1;
3598 	}
3599 
3600 	if (dp_mon_rings_setup(soc, pdev)) {
3601 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3602 			  FL("MONITOR rings setup failed"));
3603 		goto fail1;
3604 	}
3605 
3606 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
3607 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
3608 		if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
3609 				  0, pdev_id, entries, 0)) {
3610 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3611 				  FL(RNG_ERR "rxdma_err_dst_ring"));
3612 			goto fail1;
3613 		}
3614 	}
3615 
3616 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
3617 		goto fail1;
3618 
3619 	if (dp_ipa_ring_resource_setup(soc, pdev))
3620 		goto fail1;
3621 
3622 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
3623 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3624 			FL("dp_ipa_uc_attach failed"));
3625 		goto fail1;
3626 	}
3627 
3628 	/* Rx specific init */
3629 	if (dp_rx_pdev_attach(pdev)) {
3630 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3631 			  FL("dp_rx_pdev_attach failed"));
3632 		goto fail2;
3633 	}
3634 
3635 	DP_STATS_INIT(pdev);
3636 
3637 	/* Monitor filter init */
3638 	pdev->mon_filter_mode = MON_FILTER_ALL;
3639 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
3640 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
3641 	pdev->fp_data_filter = FILTER_DATA_ALL;
3642 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
3643 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
3644 	pdev->mo_data_filter = FILTER_DATA_ALL;
3645 
3646 	dp_local_peer_id_pool_init(pdev);
3647 
3648 	dp_dscp_tid_map_setup(pdev);
3649 	dp_pcp_tid_map_setup(pdev);
3650 
3651 	/* Rx monitor mode specific init */
3652 	if (dp_rx_pdev_mon_attach(pdev)) {
3653 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3654 				"dp_rx_pdev_mon_attach failed");
3655 		goto fail2;
3656 	}
3657 
3658 	if (dp_wdi_event_attach(pdev)) {
3659 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3660 				"dp_wdi_evet_attach failed");
3661 		goto wdi_attach_fail;
3662 	}
3663 
3664 	/* set the reo destination during initialization */
3665 	pdev->reo_dest = pdev->pdev_id + 1;
3666 
3667 	/*
3668 	 * initialize ppdu tlv list
3669 	 */
3670 	TAILQ_INIT(&pdev->ppdu_info_list);
3671 	pdev->tlv_count = 0;
3672 	pdev->list_depth = 0;
3673 
3674 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
3675 
3676 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
3677 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
3678 			      TRUE);
3679 
3680 	if (pdev->sojourn_buf) {
3681 		sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
3682 		qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
3683 	}
3684 	/* initlialize cal client timer */
3685 	dp_cal_client_attach(&pdev->cal_client_ctx,
3686 			     dp_pdev_to_cdp_pdev(pdev),
3687 			     pdev->soc->osdev,
3688 			     &dp_iterate_update_peer_list);
3689 	qdf_event_create(&pdev->fw_peer_stats_event);
3690 
3691 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3692 
3693 	dp_init_tso_stats(pdev);
3694 
3695 	if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS)
3696 		goto fail1;
3697 
3698 	dp_tx_ppdu_stats_attach(pdev);
3699 
3700 	return (struct cdp_pdev *)pdev;
3701 
3702 wdi_attach_fail:
3703 	/*
3704 	 * dp_mon_link_desc_pool_cleanup is done in dp_pdev_detach
3705 	 * and hence need not to be done here.
3706 	 */
3707 	dp_rx_pdev_mon_detach(pdev);
3708 
3709 fail2:
3710 	dp_rx_pdev_detach(pdev);
3711 
3712 fail1:
3713 	if (pdev->invalid_peer)
3714 		qdf_mem_free(pdev->invalid_peer);
3715 	dp_pdev_detach((struct cdp_pdev *)pdev, 0);
3716 
3717 fail0:
3718 	return NULL;
3719 }
3720 
3721 /*
3722 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
3723 * @soc: data path SoC handle
3724 * @pdev: Physical device handle
3725 *
3726 * Return: void
3727 */
3728 #ifdef QCA_HOST2FW_RXBUF_RING
3729 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3730 	 struct dp_pdev *pdev)
3731 {
3732 	int i;
3733 
3734 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
3735 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
3736 			 RXDMA_BUF, 1);
3737 
3738 	if (soc->reap_timer_init) {
3739 		qdf_timer_free(&soc->mon_reap_timer);
3740 		soc->reap_timer_init = 0;
3741 	}
3742 }
3743 #else
3744 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3745 	 struct dp_pdev *pdev)
3746 {
3747 }
3748 #endif
3749 
3750 /*
3751  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3752  * @pdev: device object
3753  *
3754  * Return: void
3755  */
3756 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3757 {
3758 	struct dp_neighbour_peer *peer = NULL;
3759 	struct dp_neighbour_peer *temp_peer = NULL;
3760 
3761 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3762 			neighbour_peer_list_elem, temp_peer) {
3763 		/* delete this peer from the list */
3764 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
3765 				peer, neighbour_peer_list_elem);
3766 		qdf_mem_free(peer);
3767 	}
3768 
3769 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3770 }
3771 
3772 /**
3773 * dp_htt_ppdu_stats_detach() - detach stats resources
3774 * @pdev: Datapath PDEV handle
3775 *
3776 * Return: void
3777 */
3778 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3779 {
3780 	struct ppdu_info *ppdu_info, *ppdu_info_next;
3781 
3782 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3783 			ppdu_info_list_elem, ppdu_info_next) {
3784 		if (!ppdu_info)
3785 			break;
3786 		qdf_assert_always(ppdu_info->nbuf);
3787 		qdf_nbuf_free(ppdu_info->nbuf);
3788 		qdf_mem_free(ppdu_info);
3789 	}
3790 
3791 	if (pdev->ppdu_tlv_buf)
3792 		qdf_mem_free(pdev->ppdu_tlv_buf);
3793 
3794 }
3795 
3796 #if !defined(DISABLE_MON_CONFIG)
3797 
3798 static
3799 void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
3800 			 int mac_id)
3801 {
3802 		if (soc->wlan_cfg_ctx->rxdma1_enable) {
3803 			dp_srng_cleanup(soc,
3804 					&pdev->rxdma_mon_buf_ring[mac_id],
3805 					RXDMA_MONITOR_BUF, 0);
3806 
3807 			dp_srng_cleanup(soc,
3808 					&pdev->rxdma_mon_dst_ring[mac_id],
3809 					RXDMA_MONITOR_DST, 0);
3810 
3811 			dp_srng_cleanup(soc,
3812 					&pdev->rxdma_mon_status_ring[mac_id],
3813 					RXDMA_MONITOR_STATUS, 0);
3814 
3815 			dp_srng_cleanup(soc,
3816 					&pdev->rxdma_mon_desc_ring[mac_id],
3817 					RXDMA_MONITOR_DESC, 0);
3818 
3819 			dp_srng_cleanup(soc,
3820 					&pdev->rxdma_err_dst_ring[mac_id],
3821 					RXDMA_DST, 0);
3822 		} else {
3823 			dp_srng_cleanup(soc,
3824 					&pdev->rxdma_mon_status_ring[mac_id],
3825 					RXDMA_MONITOR_STATUS, 0);
3826 
3827 			dp_srng_cleanup(soc,
3828 					&pdev->rxdma_err_dst_ring[mac_id],
3829 					RXDMA_DST, 0);
3830 		}
3831 
3832 }
3833 #else
3834 static void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
3835 				int mac_id)
3836 {
3837 }
3838 #endif
3839 
3840 /**
3841  * dp_mon_ring_deinit() - Placeholder to deinitialize Monitor rings
3842  *
3843  * @soc: soc handle
3844  * @pdev: datapath physical dev handle
3845  * @mac_id: mac number
3846  *
3847  * Return: None
3848  */
3849 static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3850 			       int mac_id)
3851 {
3852 }
3853 
3854 /**
3855  * dp_pdev_mem_reset() - Reset txrx pdev memory
3856  * @pdev: dp pdev handle
3857  *
3858  * Return: None
3859  */
3860 static void dp_pdev_mem_reset(struct dp_pdev *pdev)
3861 {
3862 	uint16_t len = 0;
3863 	uint8_t *dp_pdev_offset = (uint8_t *)pdev;
3864 
3865 	len = sizeof(struct dp_pdev) -
3866 		offsetof(struct dp_pdev, pdev_deinit) -
3867 		sizeof(pdev->pdev_deinit);
3868 	dp_pdev_offset = dp_pdev_offset +
3869 			 offsetof(struct dp_pdev, pdev_deinit) +
3870 			 sizeof(pdev->pdev_deinit);
3871 
3872 	qdf_mem_zero(dp_pdev_offset, len);
3873 }
3874 
3875 #ifdef WLAN_DP_PENDING_MEM_FLUSH
3876 /**
3877  * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
3878  * @pdev: Datapath PDEV handle
3879  *
3880  * This is the last chance to flush all pending dp vdevs/peers,
3881  * some peer/vdev leak case like Non-SSR + peer unmap missing
3882  * will be covered here.
3883  *
3884  * Return: None
3885  */
3886 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
3887 {
3888 	struct dp_vdev *vdev = NULL;
3889 
3890 	while (true) {
3891 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
3892 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
3893 			if (vdev->delete.pending)
3894 				break;
3895 		}
3896 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3897 
3898 		/*
3899 		 * vdev will be freed when all peers get cleanup,
3900 		 * dp_delete_pending_vdev will remove vdev from vdev_list
3901 		 * in pdev.
3902 		 */
3903 		if (vdev)
3904 			dp_vdev_flush_peers((struct cdp_vdev *)vdev, 0);
3905 		else
3906 			break;
3907 	}
3908 }
3909 #else
3910 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
3911 {
3912 }
3913 #endif
3914 
3915 /**
3916  * dp_pdev_deinit() - Deinit txrx pdev
3917  * @txrx_pdev: Datapath PDEV handle
3918  * @force: Force deinit
3919  *
3920  * Return: None
3921  */
3922 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
3923 {
3924 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3925 	struct dp_soc *soc = pdev->soc;
3926 	qdf_nbuf_t curr_nbuf, next_nbuf;
3927 	int mac_id;
3928 
3929 	/*
3930 	 * Prevent double pdev deinitialization during radio detach
3931 	 * execution .i.e. in the absence of any vdev
3932 	 */
3933 	if (pdev->pdev_deinit)
3934 		return;
3935 
3936 	pdev->pdev_deinit = 1;
3937 
3938 	dp_wdi_event_detach(pdev);
3939 
3940 	dp_pdev_flush_pending_vdevs(pdev);
3941 
3942 	dp_tx_pdev_detach(pdev);
3943 
3944 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3945 		dp_srng_deinit(soc, &soc->tcl_data_ring[pdev->pdev_id],
3946 			       TCL_DATA, pdev->pdev_id);
3947 		dp_srng_deinit(soc, &soc->tx_comp_ring[pdev->pdev_id],
3948 			       WBM2SW_RELEASE, pdev->pdev_id);
3949 	}
3950 
3951 	dp_pktlogmod_exit(pdev);
3952 
3953 	dp_rx_fst_detach(soc, pdev);
3954 	dp_rx_pdev_detach(pdev);
3955 	dp_rx_pdev_mon_detach(pdev);
3956 	dp_neighbour_peers_detach(pdev);
3957 	qdf_spinlock_destroy(&pdev->tx_mutex);
3958 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
3959 
3960 	dp_ipa_uc_detach(soc, pdev);
3961 
3962 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
3963 
3964 	/* Cleanup per PDEV REO rings if configured */
3965 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3966 		dp_srng_deinit(soc, &soc->reo_dest_ring[pdev->pdev_id],
3967 			       REO_DST, pdev->pdev_id);
3968 	}
3969 
3970 	dp_srng_deinit(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
3971 
3972 	dp_rxdma_ring_cleanup(soc, pdev);
3973 
3974 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3975 		dp_mon_ring_deinit(soc, pdev, mac_id);
3976 		dp_srng_deinit(soc, &pdev->rxdma_err_dst_ring[mac_id],
3977 			       RXDMA_DST, 0);
3978 	}
3979 
3980 	curr_nbuf = pdev->invalid_peer_head_msdu;
3981 	while (curr_nbuf) {
3982 		next_nbuf = qdf_nbuf_next(curr_nbuf);
3983 		qdf_nbuf_free(curr_nbuf);
3984 		curr_nbuf = next_nbuf;
3985 	}
3986 	pdev->invalid_peer_head_msdu = NULL;
3987 	pdev->invalid_peer_tail_msdu = NULL;
3988 
3989 	dp_htt_ppdu_stats_detach(pdev);
3990 
3991 	dp_tx_ppdu_stats_detach(pdev);
3992 
3993 	qdf_nbuf_free(pdev->sojourn_buf);
3994 	qdf_nbuf_queue_free(&pdev->rx_ppdu_buf_q);
3995 
3996 	dp_cal_client_detach(&pdev->cal_client_ctx);
3997 
3998 	soc->pdev_count--;
3999 
4000 	/* only do soc common cleanup when last pdev do detach */
4001 	if (!(soc->pdev_count))
4002 		dp_soc_cmn_cleanup(soc);
4003 
4004 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
4005 	if (pdev->invalid_peer)
4006 		qdf_mem_free(pdev->invalid_peer);
4007 	qdf_mem_free(pdev->dp_txrx_handle);
4008 	dp_pdev_mem_reset(pdev);
4009 }
4010 
4011 /**
4012  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
4013  * @psoc: Datapath psoc handle
4014  * @pdev_id: Id of datapath PDEV handle
4015  * @force: Force deinit
4016  *
4017  * Return: QDF_STATUS
4018  */
4019 static QDF_STATUS
4020 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
4021 		     int force)
4022 {
4023 	struct dp_soc *soc = (struct dp_soc *)psoc;
4024 	struct dp_pdev *txrx_pdev =
4025 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
4026 						   pdev_id);
4027 
4028 	if (!txrx_pdev)
4029 		return QDF_STATUS_E_FAILURE;
4030 
4031 	soc->dp_soc_reinit = TRUE;
4032 
4033 	dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
4034 
4035 	return QDF_STATUS_SUCCESS;
4036 }
4037 
4038 /*
4039  * dp_pdev_detach() - Complete rest of pdev detach
4040  * @txrx_pdev: Datapath PDEV handle
4041  * @force: Force deinit
4042  *
4043  * Return: None
4044  */
4045 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
4046 {
4047 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4048 	struct dp_soc *soc = pdev->soc;
4049 	struct rx_desc_pool *rx_desc_pool;
4050 	int mac_id, mac_for_pdev;
4051 
4052 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
4053 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
4054 				TCL_DATA, pdev->pdev_id);
4055 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
4056 				WBM2SW_RELEASE, pdev->pdev_id);
4057 	}
4058 
4059 	dp_mon_link_free(pdev);
4060 
4061 	/* Cleanup per PDEV REO rings if configured */
4062 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
4063 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
4064 				REO_DST, pdev->pdev_id);
4065 	}
4066 	dp_rxdma_ring_cleanup(soc, pdev);
4067 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
4068 
4069 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
4070 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
4071 
4072 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4073 		dp_mon_ring_cleanup(soc, pdev, mac_id);
4074 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
4075 				RXDMA_DST, 0);
4076 		if (dp_is_soc_reinit(soc)) {
4077 			mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4078 							      pdev->pdev_id);
4079 			rx_desc_pool = &soc->rx_desc_status[mac_for_pdev];
4080 			dp_rx_desc_pool_free(soc, rx_desc_pool);
4081 			rx_desc_pool = &soc->rx_desc_mon[mac_for_pdev];
4082 			dp_rx_desc_pool_free(soc, rx_desc_pool);
4083 		}
4084 	}
4085 
4086 	if (dp_is_soc_reinit(soc)) {
4087 		rx_desc_pool = &soc->rx_desc_buf[pdev->pdev_id];
4088 		dp_rx_desc_pool_free(soc, rx_desc_pool);
4089 	}
4090 
4091 	soc->pdev_list[pdev->pdev_id] = NULL;
4092 	qdf_minidump_remove(pdev);
4093 	qdf_mem_free(pdev);
4094 }
4095 
4096 /*
4097  * dp_pdev_detach_wifi3() - detach txrx pdev
4098  * @psoc: Datapath soc handle
4099  * @pdev_id: pdev id of pdev
4100  * @force: Force detach
4101  *
4102  * Return: QDF_STATUS
4103  */
4104 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
4105 				       int force)
4106 {
4107 	struct dp_soc *soc = (struct dp_soc *)psoc;
4108 	struct dp_pdev *txrx_pdev =
4109 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
4110 						   pdev_id);
4111 
4112 	if (!txrx_pdev) {
4113 		dp_err("Couldn't find dp pdev");
4114 		return QDF_STATUS_E_FAILURE;
4115 	}
4116 
4117 	if (dp_is_soc_reinit(soc)) {
4118 		dp_pdev_detach((struct cdp_pdev *)txrx_pdev, force);
4119 	} else {
4120 		dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
4121 		dp_pdev_detach((struct cdp_pdev *)txrx_pdev, force);
4122 	}
4123 
4124 	return QDF_STATUS_SUCCESS;
4125 }
4126 
4127 /*
4128  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
4129  * @soc: DP SOC handle
4130  */
4131 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
4132 {
4133 	struct reo_desc_list_node *desc;
4134 	struct dp_rx_tid *rx_tid;
4135 
4136 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
4137 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
4138 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
4139 		rx_tid = &desc->rx_tid;
4140 		qdf_mem_unmap_nbytes_single(soc->osdev,
4141 			rx_tid->hw_qdesc_paddr,
4142 			QDF_DMA_BIDIRECTIONAL,
4143 			rx_tid->hw_qdesc_alloc_size);
4144 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
4145 		qdf_mem_free(desc);
4146 	}
4147 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
4148 	qdf_list_destroy(&soc->reo_desc_freelist);
4149 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
4150 }
4151 
4152 /**
4153  * dp_soc_mem_reset() - Reset Dp Soc memory
4154  * @soc: DP handle
4155  *
4156  * Return: None
4157  */
4158 static void dp_soc_mem_reset(struct dp_soc *soc)
4159 {
4160 	uint16_t len = 0;
4161 	uint8_t *dp_soc_offset = (uint8_t *)soc;
4162 
4163 	len = sizeof(struct dp_soc) -
4164 		offsetof(struct dp_soc, dp_soc_reinit) -
4165 		sizeof(soc->dp_soc_reinit);
4166 	dp_soc_offset = dp_soc_offset +
4167 			offsetof(struct dp_soc, dp_soc_reinit) +
4168 			sizeof(soc->dp_soc_reinit);
4169 
4170 	qdf_mem_zero(dp_soc_offset, len);
4171 }
4172 
4173 /**
4174  * dp_soc_deinit() - Deinitialize txrx SOC
4175  * @txrx_soc: Opaque DP SOC handle
4176  *
4177  * Return: None
4178  */
4179 static void dp_soc_deinit(void *txrx_soc)
4180 {
4181 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4182 	int i;
4183 
4184 	qdf_atomic_set(&soc->cmn_init_done, 0);
4185 
4186 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4187 		if (soc->pdev_list[i])
4188 			dp_pdev_deinit((struct cdp_pdev *)
4189 					soc->pdev_list[i], 1);
4190 	}
4191 
4192 	qdf_flush_work(&soc->htt_stats.work);
4193 	qdf_disable_work(&soc->htt_stats.work);
4194 
4195 	/* Free pending htt stats messages */
4196 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
4197 
4198 	dp_peer_find_detach(soc);
4199 
4200 	/* Free the ring memories */
4201 	/* Common rings */
4202 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
4203 
4204 	/* Tx data rings */
4205 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
4206 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
4207 			dp_srng_deinit(soc, &soc->tcl_data_ring[i],
4208 				       TCL_DATA, i);
4209 			dp_srng_deinit(soc, &soc->tx_comp_ring[i],
4210 				       WBM2SW_RELEASE, i);
4211 		}
4212 	}
4213 
4214 	/* TCL command and status rings */
4215 	dp_srng_deinit(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
4216 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
4217 
4218 	/* Rx data rings */
4219 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
4220 		soc->num_reo_dest_rings =
4221 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
4222 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
4223 			/* TODO: Get number of rings and ring sizes
4224 			 * from wlan_cfg
4225 			 */
4226 			dp_srng_deinit(soc, &soc->reo_dest_ring[i],
4227 				       REO_DST, i);
4228 		}
4229 	}
4230 	/* REO reinjection ring */
4231 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
4232 
4233 	/* Rx release ring */
4234 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
4235 
4236 	/* Rx exception ring */
4237 	/* TODO: Better to store ring_type and ring_num in
4238 	 * dp_srng during setup
4239 	 */
4240 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
4241 
4242 	/* REO command and status rings */
4243 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
4244 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
4245 
4246 	dp_soc_wds_detach(soc);
4247 
4248 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
4249 	qdf_spinlock_destroy(&soc->htt_stats.lock);
4250 
4251 	htt_soc_htc_dealloc(soc->htt_handle);
4252 
4253 	dp_reo_desc_freelist_destroy(soc);
4254 
4255 	qdf_spinlock_destroy(&soc->ast_lock);
4256 
4257 	dp_soc_mem_reset(soc);
4258 }
4259 
4260 /**
4261  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
4262  * @txrx_soc: Opaque DP SOC handle
4263  *
4264  * Return: None
4265  */
4266 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc)
4267 {
4268 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4269 
4270 	soc->dp_soc_reinit = 1;
4271 	dp_soc_deinit(txrx_soc);
4272 }
4273 
4274 /*
4275  * dp_soc_detach() - Detach rest of txrx SOC
4276  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
4277  *
4278  * Return: None
4279  */
4280 static void dp_soc_detach(struct cdp_soc_t *txrx_soc)
4281 {
4282 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4283 	int i;
4284 
4285 	qdf_atomic_set(&soc->cmn_init_done, 0);
4286 
4287 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
4288 	 * SW descriptors
4289 	 */
4290 
4291 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4292 		if (soc->pdev_list[i])
4293 			dp_pdev_detach((struct cdp_pdev *)
4294 					     soc->pdev_list[i], 1);
4295 	}
4296 
4297 	/* Free the ring memories */
4298 	/* Common rings */
4299 	qdf_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned);
4300 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
4301 
4302 	if (dp_is_soc_reinit(soc)) {
4303 		dp_tx_soc_detach(soc);
4304 	}
4305 
4306 	/* Tx data rings */
4307 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
4308 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
4309 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
4310 				TCL_DATA, i);
4311 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
4312 				WBM2SW_RELEASE, i);
4313 		}
4314 	}
4315 
4316 	/* TCL command and status rings */
4317 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
4318 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
4319 
4320 	/* Rx data rings */
4321 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
4322 		soc->num_reo_dest_rings =
4323 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
4324 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
4325 			/* TODO: Get number of rings and ring sizes
4326 			 * from wlan_cfg
4327 			 */
4328 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
4329 				REO_DST, i);
4330 		}
4331 	}
4332 	/* REO reinjection ring */
4333 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
4334 
4335 	/* Rx release ring */
4336 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
4337 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3);
4338 
4339 	/* Rx exception ring */
4340 	/* TODO: Better to store ring_type and ring_num in
4341 	 * dp_srng during setup
4342 	 */
4343 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
4344 
4345 	/* REO command and status rings */
4346 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
4347 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
4348 	dp_hw_link_desc_pool_cleanup(soc);
4349 
4350 	htt_soc_detach(soc->htt_handle);
4351 	soc->dp_soc_reinit = 0;
4352 
4353 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
4354 
4355 	qdf_minidump_remove(soc);
4356 	qdf_mem_free(soc);
4357 }
4358 
4359 /*
4360  * dp_soc_detach_wifi3() - Detach txrx SOC
4361  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
4362  *
4363  * Return: None
4364  */
4365 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc)
4366 {
4367 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4368 
4369 	if (dp_is_soc_reinit(soc)) {
4370 		dp_soc_detach(txrx_soc);
4371 	} else {
4372 		dp_soc_deinit(txrx_soc);
4373 		dp_soc_detach(txrx_soc);
4374 	}
4375 }
4376 
4377 #if !defined(DISABLE_MON_CONFIG)
4378 /**
4379  * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
4380  * @soc: soc handle
4381  * @pdev: physical device handle
4382  * @mac_id: ring number
4383  * @mac_for_pdev: mac_id
4384  *
4385  * Return: non-zero for failure, zero for success
4386  */
4387 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4388 					struct dp_pdev *pdev,
4389 					int mac_id,
4390 					int mac_for_pdev)
4391 {
4392 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4393 
4394 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
4395 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4396 					pdev->rxdma_mon_buf_ring[mac_id]
4397 					.hal_srng,
4398 					RXDMA_MONITOR_BUF);
4399 
4400 		if (status != QDF_STATUS_SUCCESS) {
4401 			dp_err("Failed to send htt srng setup message for Rxdma mon buf ring");
4402 			return status;
4403 		}
4404 
4405 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4406 					pdev->rxdma_mon_dst_ring[mac_id]
4407 					.hal_srng,
4408 					RXDMA_MONITOR_DST);
4409 
4410 		if (status != QDF_STATUS_SUCCESS) {
4411 			dp_err("Failed to send htt srng setup message for Rxdma mon dst ring");
4412 			return status;
4413 		}
4414 
4415 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4416 					pdev->rxdma_mon_status_ring[mac_id]
4417 					.hal_srng,
4418 					RXDMA_MONITOR_STATUS);
4419 
4420 		if (status != QDF_STATUS_SUCCESS) {
4421 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4422 			return status;
4423 		}
4424 
4425 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4426 					pdev->rxdma_mon_desc_ring[mac_id]
4427 					.hal_srng,
4428 					RXDMA_MONITOR_DESC);
4429 
4430 		if (status != QDF_STATUS_SUCCESS) {
4431 			dp_err("Failed to send htt srng message for Rxdma mon desc ring");
4432 			return status;
4433 		}
4434 	} else {
4435 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4436 					pdev->rxdma_mon_status_ring[mac_id]
4437 					.hal_srng,
4438 					RXDMA_MONITOR_STATUS);
4439 
4440 		if (status != QDF_STATUS_SUCCESS) {
4441 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4442 			return status;
4443 		}
4444 	}
4445 
4446 	return status;
4447 
4448 }
4449 #else
4450 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4451 					struct dp_pdev *pdev,
4452 					int mac_id,
4453 					int mac_for_pdev)
4454 {
4455 	return QDF_STATUS_SUCCESS;
4456 }
4457 #endif
4458 
4459 /*
4460  * dp_rxdma_ring_config() - configure the RX DMA rings
4461  *
4462  * This function is used to configure the MAC rings.
4463  * On MCL host provides buffers in Host2FW ring
4464  * FW refills (copies) buffers to the ring and updates
4465  * ring_idx in register
4466  *
4467  * @soc: data path SoC handle
4468  *
4469  * Return: zero on success, non-zero on failure
4470  */
4471 #ifdef QCA_HOST2FW_RXBUF_RING
4472 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4473 {
4474 	int i;
4475 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4476 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4477 		struct dp_pdev *pdev = soc->pdev_list[i];
4478 
4479 		if (pdev) {
4480 			int mac_id;
4481 			bool dbs_enable = 0;
4482 			int max_mac_rings =
4483 				 wlan_cfg_get_num_mac_rings
4484 				(pdev->wlan_cfg_ctx);
4485 
4486 			htt_srng_setup(soc->htt_handle, 0,
4487 				 pdev->rx_refill_buf_ring.hal_srng,
4488 				 RXDMA_BUF);
4489 
4490 			if (pdev->rx_refill_buf_ring2.hal_srng)
4491 				htt_srng_setup(soc->htt_handle, 0,
4492 					pdev->rx_refill_buf_ring2.hal_srng,
4493 					RXDMA_BUF);
4494 
4495 			if (soc->cdp_soc.ol_ops->
4496 				is_hw_dbs_2x2_capable) {
4497 				dbs_enable = soc->cdp_soc.ol_ops->
4498 					is_hw_dbs_2x2_capable(
4499 							(void *)soc->ctrl_psoc);
4500 			}
4501 
4502 			if (dbs_enable) {
4503 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4504 				QDF_TRACE_LEVEL_ERROR,
4505 				FL("DBS enabled max_mac_rings %d"),
4506 					 max_mac_rings);
4507 			} else {
4508 				max_mac_rings = 1;
4509 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4510 					 QDF_TRACE_LEVEL_ERROR,
4511 					 FL("DBS disabled, max_mac_rings %d"),
4512 					 max_mac_rings);
4513 			}
4514 
4515 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4516 					 FL("pdev_id %d max_mac_rings %d"),
4517 					 pdev->pdev_id, max_mac_rings);
4518 
4519 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
4520 				int mac_for_pdev = dp_get_mac_id_for_pdev(
4521 							mac_id, pdev->pdev_id);
4522 
4523 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4524 					 QDF_TRACE_LEVEL_ERROR,
4525 					 FL("mac_id %d"), mac_for_pdev);
4526 
4527 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4528 					 pdev->rx_mac_buf_ring[mac_id]
4529 						.hal_srng,
4530 					 RXDMA_BUF);
4531 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4532 					pdev->rxdma_err_dst_ring[mac_id]
4533 						.hal_srng,
4534 					RXDMA_DST);
4535 
4536 				/* Configure monitor mode rings */
4537 				status = dp_mon_htt_srng_setup(soc, pdev,
4538 							       mac_id,
4539 							       mac_for_pdev);
4540 				if (status != QDF_STATUS_SUCCESS) {
4541 					dp_err("Failed to send htt monitor messages to target");
4542 					return status;
4543 				}
4544 
4545 			}
4546 		}
4547 	}
4548 
4549 	/*
4550 	 * Timer to reap rxdma status rings.
4551 	 * Needed until we enable ppdu end interrupts
4552 	 */
4553 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
4554 			dp_service_mon_rings, (void *)soc,
4555 			QDF_TIMER_TYPE_WAKE_APPS);
4556 	soc->reap_timer_init = 1;
4557 	return status;
4558 }
4559 #else
4560 /* This is only for WIN */
4561 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4562 {
4563 	int i;
4564 	int mac_id;
4565 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4566 
4567 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4568 		struct dp_pdev *pdev = soc->pdev_list[i];
4569 
4570 		if (!pdev)
4571 			continue;
4572 
4573 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4574 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
4575 
4576 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4577 				pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
4578 #ifndef DISABLE_MON_CONFIG
4579 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4580 				pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4581 				RXDMA_MONITOR_BUF);
4582 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4583 				pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
4584 				RXDMA_MONITOR_DST);
4585 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4586 				pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4587 				RXDMA_MONITOR_STATUS);
4588 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4589 				pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
4590 				RXDMA_MONITOR_DESC);
4591 #endif
4592 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4593 				pdev->rxdma_err_dst_ring[mac_id].hal_srng,
4594 				RXDMA_DST);
4595 		}
4596 	}
4597 	return status;
4598 }
4599 #endif
4600 
4601 #ifdef NO_RX_PKT_HDR_TLV
4602 static QDF_STATUS
4603 dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
4604 {
4605 	int i;
4606 	int mac_id;
4607 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
4608 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4609 
4610 	htt_tlv_filter.mpdu_start = 1;
4611 	htt_tlv_filter.msdu_start = 1;
4612 	htt_tlv_filter.mpdu_end = 1;
4613 	htt_tlv_filter.msdu_end = 1;
4614 	htt_tlv_filter.attention = 1;
4615 	htt_tlv_filter.packet = 1;
4616 	htt_tlv_filter.packet_header = 0;
4617 
4618 	htt_tlv_filter.ppdu_start = 0;
4619 	htt_tlv_filter.ppdu_end = 0;
4620 	htt_tlv_filter.ppdu_end_user_stats = 0;
4621 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4622 	htt_tlv_filter.ppdu_end_status_done = 0;
4623 	htt_tlv_filter.enable_fp = 1;
4624 	htt_tlv_filter.enable_md = 0;
4625 	htt_tlv_filter.enable_md = 0;
4626 	htt_tlv_filter.enable_mo = 0;
4627 
4628 	htt_tlv_filter.fp_mgmt_filter = 0;
4629 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
4630 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
4631 					 FILTER_DATA_MCAST |
4632 					 FILTER_DATA_DATA);
4633 	htt_tlv_filter.mo_mgmt_filter = 0;
4634 	htt_tlv_filter.mo_ctrl_filter = 0;
4635 	htt_tlv_filter.mo_data_filter = 0;
4636 	htt_tlv_filter.md_data_filter = 0;
4637 
4638 	htt_tlv_filter.offset_valid = true;
4639 
4640 	htt_tlv_filter.rx_packet_offset = RX_PKT_TLVS_LEN;
4641 	/*Not subscribing rx_pkt_header*/
4642 	htt_tlv_filter.rx_header_offset = 0;
4643 	htt_tlv_filter.rx_mpdu_start_offset =
4644 				HAL_RX_PKT_TLV_MPDU_START_OFFSET(soc->hal_soc);
4645 	htt_tlv_filter.rx_mpdu_end_offset =
4646 				HAL_RX_PKT_TLV_MPDU_END_OFFSET(soc->hal_soc);
4647 	htt_tlv_filter.rx_msdu_start_offset =
4648 				HAL_RX_PKT_TLV_MSDU_START_OFFSET(soc->hal_soc);
4649 	htt_tlv_filter.rx_msdu_end_offset =
4650 				HAL_RX_PKT_TLV_MSDU_END_OFFSET(soc->hal_soc);
4651 	htt_tlv_filter.rx_attn_offset =
4652 				HAL_RX_PKT_TLV_ATTN_OFFSET(soc->hal_soc);
4653 
4654 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4655 		struct dp_pdev *pdev = soc->pdev_list[i];
4656 
4657 		if (!pdev)
4658 			continue;
4659 
4660 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4661 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4662 					pdev->pdev_id);
4663 
4664 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4665 					    pdev->rx_refill_buf_ring.hal_srng,
4666 					    RXDMA_BUF, RX_BUFFER_SIZE,
4667 					    &htt_tlv_filter);
4668 		}
4669 	}
4670 	return status;
4671 }
4672 #else
4673 static QDF_STATUS
4674 dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
4675 {
4676 	return QDF_STATUS_SUCCESS;
4677 }
4678 #endif
4679 
4680 /*
4681  * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
4682  *
4683  * This function is used to configure the FSE HW block in RX OLE on a
4684  * per pdev basis. Here, we will be programming parameters related to
4685  * the Flow Search Table.
4686  *
4687  * @soc: data path SoC handle
4688  *
4689  * Return: zero on success, non-zero on failure
4690  */
4691 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
4692 static QDF_STATUS
4693 dp_rx_target_fst_config(struct dp_soc *soc)
4694 {
4695 	int i;
4696 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4697 
4698 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4699 		struct dp_pdev *pdev = soc->pdev_list[i];
4700 
4701 		/* Flow search is not enabled if NSS offload is enabled */
4702 		if (pdev &&
4703 		    !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
4704 			status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
4705 			if (status != QDF_STATUS_SUCCESS)
4706 				break;
4707 		}
4708 	}
4709 	return status;
4710 }
4711 #else
4712 /**
4713  * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
4714  * @soc: SoC handle
4715  *
4716  * Return: Success
4717  */
4718 static inline QDF_STATUS
4719 dp_rx_target_fst_config(struct dp_soc *soc)
4720 {
4721 	return QDF_STATUS_SUCCESS;
4722 }
4723 
4724 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
4725 
4726 /*
4727  * dp_soc_attach_target_wifi3() - SOC initialization in the target
4728  * @cdp_soc: Opaque Datapath SOC handle
4729  *
4730  * Return: zero on success, non-zero on failure
4731  */
4732 static QDF_STATUS
4733 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
4734 {
4735 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4736 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4737 
4738 	htt_soc_attach_target(soc->htt_handle);
4739 
4740 	status = dp_rxdma_ring_config(soc);
4741 	if (status != QDF_STATUS_SUCCESS) {
4742 		dp_err("Failed to send htt srng setup messages to target");
4743 		return status;
4744 	}
4745 
4746 	status = dp_rxdma_ring_sel_cfg(soc);
4747 	if (status != QDF_STATUS_SUCCESS) {
4748 		dp_err("Failed to send htt ring config message to target");
4749 		return status;
4750 	}
4751 
4752 	status = dp_rx_target_fst_config(soc);
4753 	if (status != QDF_STATUS_SUCCESS) {
4754 		dp_err("Failed to send htt fst setup config message to target");
4755 		return status;
4756 	}
4757 
4758 	DP_STATS_INIT(soc);
4759 
4760 	/* initialize work queue for stats processing */
4761 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
4762 
4763 	qdf_minidump_log(soc, sizeof(*soc), "dp_soc");
4764 
4765 	return QDF_STATUS_SUCCESS;
4766 }
4767 
4768 /*
4769  * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
4770  * @txrx_soc: Datapath SOC handle
4771  */
4772 static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
4773 {
4774 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
4775 	return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
4776 }
4777 
4778 /*
4779  * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
4780  * @txrx_soc: Datapath SOC handle
4781  * @nss_cfg: nss config
4782  */
4783 static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
4784 {
4785 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
4786 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
4787 
4788 	wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
4789 
4790 	/*
4791 	 * TODO: masked out based on the per offloaded radio
4792 	 */
4793 	switch (config) {
4794 	case dp_nss_cfg_default:
4795 		break;
4796 	case dp_nss_cfg_first_radio:
4797 		/*
4798 		 * This configuration is valid for single band radio which
4799 		 * is also NSS offload.
4800 		 */
4801 	case dp_nss_cfg_dbdc:
4802 	case dp_nss_cfg_dbtc:
4803 		wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
4804 		wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
4805 		wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
4806 		wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
4807 		break;
4808 	default:
4809 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4810 			  "Invalid offload config %d", config);
4811 	}
4812 
4813 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4814 		  FL("nss-wifi<0> nss config is enabled"));
4815 }
4816 
4817 /*
4818 * dp_vdev_attach_wifi3() - attach txrx vdev
4819 * @txrx_pdev: Datapath PDEV handle
4820 * @vdev_mac_addr: MAC address of the virtual interface
4821 * @vdev_id: VDEV Id
4822 * @wlan_op_mode: VDEV operating mode
4823 * @subtype: VDEV operating subtype
4824 *
4825 * Return: DP VDEV handle on success, NULL on failure
4826 */
4827 static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
4828 					     uint8_t pdev_id,
4829 					     uint8_t *vdev_mac_addr,
4830 					     uint8_t vdev_id,
4831 					     enum wlan_op_mode op_mode,
4832 					     enum wlan_op_subtype subtype)
4833 {
4834 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4835 	struct dp_pdev *pdev =
4836 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
4837 						   pdev_id);
4838 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
4839 
4840 	if (!pdev) {
4841 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4842 			  FL("DP PDEV is Null for pdev id %d"), pdev_id);
4843 		qdf_mem_free(vdev);
4844 		goto fail0;
4845 	}
4846 
4847 	if (!vdev) {
4848 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4849 			FL("DP VDEV memory allocation failed"));
4850 		goto fail0;
4851 	}
4852 
4853 	vdev->pdev = pdev;
4854 	vdev->vdev_id = vdev_id;
4855 	vdev->opmode = op_mode;
4856 	vdev->subtype = subtype;
4857 	vdev->osdev = soc->osdev;
4858 
4859 	vdev->osif_rx = NULL;
4860 	vdev->osif_rsim_rx_decap = NULL;
4861 	vdev->osif_get_key = NULL;
4862 	vdev->osif_rx_mon = NULL;
4863 	vdev->osif_tx_free_ext = NULL;
4864 	vdev->osif_vdev = NULL;
4865 
4866 	vdev->delete.pending = 0;
4867 	vdev->safemode = 0;
4868 	vdev->drop_unenc = 1;
4869 	vdev->sec_type = cdp_sec_type_none;
4870 #ifdef notyet
4871 	vdev->filters_num = 0;
4872 #endif
4873 
4874 	qdf_mem_copy(
4875 		&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
4876 
4877 	/* TODO: Initialize default HTT meta data that will be used in
4878 	 * TCL descriptors for packets transmitted from this VDEV
4879 	 */
4880 
4881 	TAILQ_INIT(&vdev->peer_list);
4882 	dp_peer_multipass_list_init(vdev);
4883 
4884 	if ((soc->intr_mode == DP_INTR_POLL) &&
4885 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
4886 		if ((pdev->vdev_count == 0) ||
4887 		    (wlan_op_mode_monitor == vdev->opmode))
4888 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
4889 	}
4890 
4891 	soc->vdev_id_map[vdev_id] = vdev;
4892 
4893 	if (wlan_op_mode_monitor == vdev->opmode) {
4894 		pdev->monitor_vdev = vdev;
4895 		return (struct cdp_vdev *)vdev;
4896 	}
4897 
4898 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
4899 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
4900 	vdev->dscp_tid_map_id = 0;
4901 	vdev->mcast_enhancement_en = 0;
4902 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
4903 	vdev->prev_tx_enq_tstamp = 0;
4904 	vdev->prev_rx_deliver_tstamp = 0;
4905 
4906 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4907 	/* add this vdev into the pdev's list */
4908 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
4909 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4910 	pdev->vdev_count++;
4911 
4912 	if (wlan_op_mode_sta != vdev->opmode)
4913 		vdev->ap_bridge_enabled = true;
4914 	else
4915 		vdev->ap_bridge_enabled = false;
4916 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4917 		  "%s: wlan_cfg_ap_bridge_enabled %d",
4918 		  __func__, vdev->ap_bridge_enabled);
4919 
4920 	dp_tx_vdev_attach(vdev);
4921 
4922 	if (pdev->vdev_count == 1)
4923 		dp_lro_hash_setup(soc, pdev);
4924 
4925 	dp_info("Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
4926 	DP_STATS_INIT(vdev);
4927 
4928 	if (wlan_op_mode_sta == vdev->opmode)
4929 		dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
4930 				     vdev->mac_addr.raw);
4931 
4932 	return (struct cdp_vdev *)vdev;
4933 
4934 fail0:
4935 	return NULL;
4936 }
4937 
4938 /**
4939  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
4940  * @soc: Datapath soc handle
4941  * @vdev_id: id of Datapath VDEV handle
4942  * @osif_vdev: OSIF vdev handle
4943  * @txrx_ops: Tx and Rx operations
4944  *
4945  * Return: DP VDEV handle on success, NULL on failure
4946  */
4947 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc,
4948 					 uint8_t vdev_id,
4949 					 ol_osif_vdev_handle osif_vdev,
4950 					 struct ol_txrx_ops *txrx_ops)
4951 {
4952 	struct dp_vdev *vdev =
4953 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
4954 						   vdev_id);
4955 
4956 	if (!vdev)
4957 		return QDF_STATUS_E_FAILURE;
4958 
4959 	vdev->osif_vdev = osif_vdev;
4960 	vdev->osif_rx = txrx_ops->rx.rx;
4961 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
4962 	vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
4963 	vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
4964 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
4965 	vdev->osif_get_key = txrx_ops->get_key;
4966 	vdev->osif_rx_mon = txrx_ops->rx.mon;
4967 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
4968 	vdev->tx_comp = txrx_ops->tx.tx_comp;
4969 #ifdef notyet
4970 #if ATH_SUPPORT_WAPI
4971 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
4972 #endif
4973 #endif
4974 #ifdef UMAC_SUPPORT_PROXY_ARP
4975 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
4976 #endif
4977 	vdev->me_convert = txrx_ops->me_convert;
4978 
4979 	/* TODO: Enable the following once Tx code is integrated */
4980 	if (vdev->mesh_vdev)
4981 		txrx_ops->tx.tx = dp_tx_send_mesh;
4982 	else
4983 		txrx_ops->tx.tx = dp_tx_send;
4984 
4985 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
4986 
4987 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
4988 		"DP Vdev Register success");
4989 
4990 	return QDF_STATUS_SUCCESS;
4991 }
4992 
4993 /**
4994  * dp_peer_flush_ast_entry() - Forcibily flush all AST entry of peer
4995  * @soc: Datapath soc handle
4996  * @peer: Datapath peer handle
4997  * @peer_id: Peer ID
4998  * @vdev_id: Vdev ID
4999  *
5000  * Return: void
5001  */
5002 static void dp_peer_flush_ast_entry(struct dp_soc *soc,
5003 				    struct dp_peer *peer,
5004 				    uint16_t peer_id,
5005 				    uint8_t vdev_id)
5006 {
5007 	struct dp_ast_entry *ase, *tmp_ase;
5008 
5009 	if (soc->is_peer_map_unmap_v2) {
5010 		DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
5011 				dp_rx_peer_unmap_handler
5012 						(soc, peer_id,
5013 						 vdev_id,
5014 						 ase->mac_addr.raw,
5015 						 1);
5016 		}
5017 	}
5018 }
5019 
5020 /**
5021  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
5022  * @vdev: Datapath VDEV handle
5023  * @unmap_only: Flag to indicate "only unmap"
5024  *
5025  * Return: void
5026  */
5027 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
5028 {
5029 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5030 	struct dp_pdev *pdev = vdev->pdev;
5031 	struct dp_soc *soc = pdev->soc;
5032 	struct dp_peer *peer;
5033 	uint16_t *peer_ids;
5034 	struct dp_peer **peer_array = NULL;
5035 	uint8_t i = 0, j = 0;
5036 	uint8_t m = 0, n = 0;
5037 
5038 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
5039 	if (!peer_ids) {
5040 		dp_err("DP alloc failure - unable to flush peers");
5041 		return;
5042 	}
5043 
5044 	if (!unmap_only) {
5045 		peer_array = qdf_mem_malloc(
5046 				soc->max_peers * sizeof(struct dp_peer *));
5047 		if (!peer_array) {
5048 			qdf_mem_free(peer_ids);
5049 			dp_err("DP alloc failure - unable to flush peers");
5050 			return;
5051 		}
5052 	}
5053 
5054 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5055 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5056 		if (!unmap_only && n < soc->max_peers)
5057 			peer_array[n++] = peer;
5058 
5059 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
5060 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
5061 				if (j < soc->max_peers)
5062 					peer_ids[j++] = peer->peer_ids[i];
5063 	}
5064 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5065 
5066 	/*
5067 	 * If peer id is invalid, need to flush the peer if
5068 	 * peer valid flag is true, this is needed for NAN + SSR case.
5069 	 */
5070 	if (!unmap_only) {
5071 		for (m = 0; m < n ; m++) {
5072 			peer = peer_array[m];
5073 
5074 			dp_info("peer: %pM is getting deleted",
5075 				peer->mac_addr.raw);
5076 			/* only if peer valid is true */
5077 			if (peer->valid)
5078 				dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
5079 						     vdev->vdev_id,
5080 						     peer->mac_addr.raw, 0);
5081 		}
5082 		qdf_mem_free(peer_array);
5083 	}
5084 
5085 	for (i = 0; i < j ; i++) {
5086 		peer = __dp_peer_find_by_id(soc, peer_ids[i]);
5087 
5088 		if (!peer)
5089 			continue;
5090 
5091 		dp_info("peer: %pM is getting unmap",
5092 			peer->mac_addr.raw);
5093 		/* free AST entries of peer */
5094 		dp_peer_flush_ast_entry(soc, peer,
5095 					peer_ids[i],
5096 					vdev->vdev_id);
5097 
5098 		dp_rx_peer_unmap_handler(soc, peer_ids[i],
5099 					 vdev->vdev_id,
5100 					 peer->mac_addr.raw, 0);
5101 	}
5102 
5103 	qdf_mem_free(peer_ids);
5104 	dp_info("Flushed peers for vdev object %pK ", vdev);
5105 }
5106 
5107 /*
5108  * dp_vdev_detach_wifi3() - Detach txrx vdev
5109  * @cdp_soc: Datapath soc handle
5110  * @vdev_id: VDEV Id
5111  * @callback: Callback OL_IF on completion of detach
5112  * @cb_context:	Callback context
5113  *
5114  */
5115 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
5116 				       uint8_t vdev_id,
5117 				       ol_txrx_vdev_delete_cb callback,
5118 				       void *cb_context)
5119 {
5120 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
5121 	struct dp_pdev *pdev;
5122 	struct dp_neighbour_peer *peer = NULL;
5123 	struct dp_neighbour_peer *temp_peer = NULL;
5124 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
5125 
5126 	if (!vdev)
5127 		return QDF_STATUS_E_FAILURE;
5128 
5129 	pdev = vdev->pdev;
5130 
5131 	soc->vdev_id_map[vdev->vdev_id] = NULL;
5132 
5133 	if (wlan_op_mode_sta == vdev->opmode)
5134 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id,
5135 				     vdev->vap_self_peer->mac_addr.raw, 0);
5136 
5137 	/*
5138 	 * If Target is hung, flush all peers before detaching vdev
5139 	 * this will free all references held due to missing
5140 	 * unmap commands from Target
5141 	 */
5142 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
5143 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
5144 
5145 	/*
5146 	 * Use peer_ref_mutex while accessing peer_list, in case
5147 	 * a peer is in the process of being removed from the list.
5148 	 */
5149 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5150 	/* check that the vdev has no peers allocated */
5151 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
5152 		/* debug print - will be removed later */
5153 		dp_warn("not deleting vdev object %pK (%pM) until deletion finishes for all its peers",
5154 			vdev, vdev->mac_addr.raw);
5155 		/* indicate that the vdev needs to be deleted */
5156 		vdev->delete.pending = 1;
5157 		vdev->delete.callback = callback;
5158 		vdev->delete.context = cb_context;
5159 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5160 		return QDF_STATUS_E_FAILURE;
5161 	}
5162 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5163 
5164 	if (wlan_op_mode_monitor == vdev->opmode)
5165 		goto free_vdev;
5166 
5167 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5168 	if (!soc->hw_nac_monitor_support) {
5169 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
5170 			      neighbour_peer_list_elem) {
5171 			QDF_ASSERT(peer->vdev != vdev);
5172 		}
5173 	} else {
5174 		TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
5175 				   neighbour_peer_list_elem, temp_peer) {
5176 			if (peer->vdev == vdev) {
5177 				TAILQ_REMOVE(&pdev->neighbour_peers_list, peer,
5178 					     neighbour_peer_list_elem);
5179 				qdf_mem_free(peer);
5180 			}
5181 		}
5182 	}
5183 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5184 
5185 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5186 	dp_tx_vdev_detach(vdev);
5187 	dp_rx_vdev_detach(vdev);
5188 	/* remove the vdev from its parent pdev's list */
5189 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
5190 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5191 
5192 free_vdev:
5193 	if (wlan_op_mode_monitor == vdev->opmode)
5194 		pdev->monitor_vdev = NULL;
5195 
5196 	dp_info("deleting vdev object %pK (%pM)", vdev, vdev->mac_addr.raw);
5197 	qdf_mem_free(vdev);
5198 
5199 	if (callback)
5200 		callback(cb_context);
5201 
5202 	return QDF_STATUS_SUCCESS;
5203 }
5204 
5205 #ifdef FEATURE_AST
5206 /*
5207  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
5208  * @soc - datapath soc handle
5209  * @peer - datapath peer handle
5210  *
5211  * Delete the AST entries belonging to a peer
5212  */
5213 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
5214 					      struct dp_peer *peer)
5215 {
5216 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
5217 
5218 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
5219 		dp_peer_del_ast(soc, ast_entry);
5220 
5221 	peer->self_ast_entry = NULL;
5222 }
5223 #else
5224 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
5225 					      struct dp_peer *peer)
5226 {
5227 }
5228 #endif
5229 #if ATH_SUPPORT_WRAP
5230 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
5231 						uint8_t *peer_mac_addr)
5232 {
5233 	struct dp_peer *peer;
5234 
5235 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
5236 				      0, vdev->vdev_id);
5237 	if (!peer)
5238 		return NULL;
5239 
5240 	if (peer->bss_peer)
5241 		return peer;
5242 
5243 	dp_peer_unref_delete(peer);
5244 	return NULL;
5245 }
5246 #else
5247 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
5248 						uint8_t *peer_mac_addr)
5249 {
5250 	struct dp_peer *peer;
5251 
5252 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
5253 				      0, vdev->vdev_id);
5254 	if (!peer)
5255 		return NULL;
5256 
5257 	if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
5258 		return peer;
5259 
5260 	dp_peer_unref_delete(peer);
5261 	return NULL;
5262 }
5263 #endif
5264 
5265 #ifdef FEATURE_AST
5266 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
5267 					       struct dp_pdev *pdev,
5268 					       uint8_t *peer_mac_addr)
5269 {
5270 	struct dp_ast_entry *ast_entry;
5271 
5272 	qdf_spin_lock_bh(&soc->ast_lock);
5273 	if (soc->ast_override_support)
5274 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
5275 							    pdev->pdev_id);
5276 	else
5277 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
5278 
5279 	if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
5280 		dp_peer_del_ast(soc, ast_entry);
5281 
5282 	qdf_spin_unlock_bh(&soc->ast_lock);
5283 }
5284 #endif
5285 
5286 #ifdef PEER_CACHE_RX_PKTS
5287 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
5288 {
5289 	qdf_spinlock_create(&peer->bufq_info.bufq_lock);
5290 	peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
5291 	qdf_list_create(&peer->bufq_info.cached_bufq, DP_RX_CACHED_BUFQ_THRESH);
5292 }
5293 #else
5294 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
5295 {
5296 }
5297 #endif
5298 
5299 /*
5300  * dp_peer_create_wifi3() - attach txrx peer
5301  * @soc_hdl: Datapath soc handle
5302  * @vdev_id: id of vdev
5303  * @peer_mac_addr: Peer MAC address
5304  *
5305  * Return: DP peeer handle on success, NULL on failure
5306  */
5307 static void *dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5308 				  uint8_t *peer_mac_addr)
5309 {
5310 	struct dp_peer *peer;
5311 	int i;
5312 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5313 	struct dp_pdev *pdev;
5314 	struct cdp_peer_cookie peer_cookie;
5315 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
5316 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
5317 
5318 	if (!vdev || !peer_mac_addr)
5319 		return NULL;
5320 
5321 	pdev = vdev->pdev;
5322 	soc = pdev->soc;
5323 
5324 	/*
5325 	 * If a peer entry with given MAC address already exists,
5326 	 * reuse the peer and reset the state of peer.
5327 	 */
5328 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
5329 
5330 	if (peer) {
5331 		qdf_atomic_init(&peer->is_default_route_set);
5332 		dp_peer_cleanup(vdev, peer, true);
5333 
5334 		qdf_spin_lock_bh(&soc->ast_lock);
5335 		dp_peer_delete_ast_entries(soc, peer);
5336 		peer->delete_in_progress = false;
5337 		qdf_spin_unlock_bh(&soc->ast_lock);
5338 
5339 		if ((vdev->opmode == wlan_op_mode_sta) &&
5340 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
5341 		     QDF_MAC_ADDR_SIZE)) {
5342 			ast_type = CDP_TXRX_AST_TYPE_SELF;
5343 		}
5344 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
5345 		/*
5346 		* Control path maintains a node count which is incremented
5347 		* for every new peer create command. Since new peer is not being
5348 		* created and earlier reference is reused here,
5349 		* peer_unref_delete event is sent to control path to
5350 		* increment the count back.
5351 		*/
5352 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
5353 			soc->cdp_soc.ol_ops->peer_unref_delete(
5354 				soc->ctrl_psoc,
5355 				pdev->pdev_id,
5356 				peer->mac_addr.raw, vdev->mac_addr.raw,
5357 				vdev->opmode);
5358 		}
5359 
5360 		dp_local_peer_id_alloc(pdev, peer);
5361 
5362 		qdf_spinlock_create(&peer->peer_info_lock);
5363 		dp_peer_rx_bufq_resources_init(peer);
5364 
5365 		DP_STATS_INIT(peer);
5366 		DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
5367 
5368 		return (void *)peer;
5369 	} else {
5370 		/*
5371 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
5372 		 * need to remove the AST entry which was earlier added as a WDS
5373 		 * entry.
5374 		 * If an AST entry exists, but no peer entry exists with a given
5375 		 * MAC addresses, we could deduce it as a WDS entry
5376 		 */
5377 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
5378 	}
5379 
5380 #ifdef notyet
5381 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
5382 		soc->mempool_ol_ath_peer);
5383 #else
5384 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
5385 #endif
5386 
5387 	if (!peer)
5388 		return NULL; /* failure */
5389 
5390 	qdf_mem_zero(peer, sizeof(struct dp_peer));
5391 
5392 	TAILQ_INIT(&peer->ast_entry_list);
5393 
5394 	/* store provided params */
5395 	peer->vdev = vdev;
5396 
5397 	if ((vdev->opmode == wlan_op_mode_sta) &&
5398 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
5399 			 QDF_MAC_ADDR_SIZE)) {
5400 		ast_type = CDP_TXRX_AST_TYPE_SELF;
5401 	}
5402 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
5403 	qdf_spinlock_create(&peer->peer_info_lock);
5404 
5405 	dp_peer_rx_bufq_resources_init(peer);
5406 
5407 	qdf_mem_copy(
5408 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
5409 
5410 	/* initialize the peer_id */
5411 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
5412 		peer->peer_ids[i] = HTT_INVALID_PEER;
5413 
5414 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5415 
5416 	qdf_atomic_init(&peer->ref_cnt);
5417 
5418 	/* keep one reference for attach */
5419 	qdf_atomic_inc(&peer->ref_cnt);
5420 
5421 	/* add this peer into the vdev's list */
5422 	if (wlan_op_mode_sta == vdev->opmode)
5423 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
5424 	else
5425 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
5426 
5427 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5428 
5429 	/* TODO: See if hash based search is required */
5430 	dp_peer_find_hash_add(soc, peer);
5431 
5432 	/* Initialize the peer state */
5433 	peer->state = OL_TXRX_PEER_STATE_DISC;
5434 
5435 	dp_info("vdev %pK created peer %pK (%pM) ref_cnt: %d",
5436 		vdev, peer, peer->mac_addr.raw,
5437 		qdf_atomic_read(&peer->ref_cnt));
5438 	/*
5439 	 * For every peer MAp message search and set if bss_peer
5440 	 */
5441 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
5442 			QDF_MAC_ADDR_SIZE) == 0 &&
5443 			(wlan_op_mode_sta != vdev->opmode)) {
5444 		dp_info("vdev bss_peer!!");
5445 		peer->bss_peer = 1;
5446 		vdev->vap_bss_peer = peer;
5447 	}
5448 
5449 	if (wlan_op_mode_sta == vdev->opmode &&
5450 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
5451 			QDF_MAC_ADDR_SIZE) == 0) {
5452 		vdev->vap_self_peer = peer;
5453 	}
5454 
5455 	for (i = 0; i < DP_MAX_TIDS; i++)
5456 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
5457 
5458 	peer->valid = 1;
5459 	dp_local_peer_id_alloc(pdev, peer);
5460 	DP_STATS_INIT(peer);
5461 	DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
5462 
5463 	qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
5464 		     QDF_MAC_ADDR_SIZE);
5465 	peer_cookie.ctx = NULL;
5466 	peer_cookie.pdev_id = pdev->pdev_id;
5467 	peer_cookie.cookie = pdev->next_peer_cookie++;
5468 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
5469 	dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, pdev->soc,
5470 			     (void *)&peer_cookie,
5471 			     peer->peer_ids[0], WDI_NO_VAL, pdev->pdev_id);
5472 #endif
5473 	if (soc->wlanstats_enabled) {
5474 		if (!peer_cookie.ctx) {
5475 			pdev->next_peer_cookie--;
5476 			qdf_err("Failed to initialize peer rate stats");
5477 		} else {
5478 			peer->wlanstats_ctx = (struct cdp_peer_rate_stats_ctx *)
5479 						peer_cookie.ctx;
5480 		}
5481 	}
5482 	return (void *)peer;
5483 }
5484 
5485 /*
5486  * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
5487  * @vdev: Datapath VDEV handle
5488  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5489  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5490  *
5491  * Return: None
5492  */
5493 static
5494 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
5495 				  enum cdp_host_reo_dest_ring *reo_dest,
5496 				  bool *hash_based)
5497 {
5498 	struct dp_soc *soc;
5499 	struct dp_pdev *pdev;
5500 
5501 	pdev = vdev->pdev;
5502 	soc = pdev->soc;
5503 	/*
5504 	 * hash based steering is disabled for Radios which are offloaded
5505 	 * to NSS
5506 	 */
5507 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
5508 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
5509 
5510 	/*
5511 	 * Below line of code will ensure the proper reo_dest ring is chosen
5512 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
5513 	 */
5514 	*reo_dest = pdev->reo_dest;
5515 }
5516 
5517 #ifdef IPA_OFFLOAD
5518 /**
5519  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
5520  * @vdev: Virtual device
5521  *
5522  * Return: true if the vdev is of subtype P2P
5523  *	   false if the vdev is of any other subtype
5524  */
5525 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
5526 {
5527 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
5528 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
5529 	    vdev->subtype == wlan_op_subtype_p2p_go)
5530 		return true;
5531 
5532 	return false;
5533 }
5534 
5535 /*
5536  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
5537  * @vdev: Datapath VDEV handle
5538  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5539  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5540  *
5541  * If IPA is enabled in ini, for SAP mode, disable hash based
5542  * steering, use default reo_dst ring for RX. Use config values for other modes.
5543  * Return: None
5544  */
5545 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
5546 				       enum cdp_host_reo_dest_ring *reo_dest,
5547 				       bool *hash_based)
5548 {
5549 	struct dp_soc *soc;
5550 	struct dp_pdev *pdev;
5551 
5552 	pdev = vdev->pdev;
5553 	soc = pdev->soc;
5554 
5555 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
5556 
5557 	/* For P2P-GO interfaces we do not need to change the REO
5558 	 * configuration even if IPA config is enabled
5559 	 */
5560 	if (dp_is_vdev_subtype_p2p(vdev))
5561 		return;
5562 
5563 	/*
5564 	 * If IPA is enabled, disable hash-based flow steering and set
5565 	 * reo_dest_ring_4 as the REO ring to receive packets on.
5566 	 * IPA is configured to reap reo_dest_ring_4.
5567 	 *
5568 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
5569 	 * value enum value is from 1 - 4.
5570 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
5571 	 */
5572 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5573 		if (vdev->opmode == wlan_op_mode_ap) {
5574 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
5575 			*hash_based = 0;
5576 		} else if (vdev->opmode == wlan_op_mode_sta &&
5577 			   dp_ipa_is_mdm_platform()) {
5578 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
5579 		}
5580 	}
5581 }
5582 
5583 #else
5584 
5585 /*
5586  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
5587  * @vdev: Datapath VDEV handle
5588  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5589  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5590  *
5591  * Use system config values for hash based steering.
5592  * Return: None
5593  */
5594 
5595 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
5596 				       enum cdp_host_reo_dest_ring *reo_dest,
5597 				       bool *hash_based)
5598 {
5599 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
5600 }
5601 #endif /* IPA_OFFLOAD */
5602 
5603 /*
5604  * dp_peer_setup_wifi3() - initialize the peer
5605  * @soc_hdl: soc handle object
5606  * @vdev_id : vdev_id of vdev object
5607  * @peer_mac: Peer's mac address
5608  *
5609  * Return: QDF_STATUS
5610  */
5611 static QDF_STATUS
5612 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5613 		    uint8_t *peer_mac)
5614 {
5615 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5616 	struct dp_pdev *pdev;
5617 	bool hash_based = 0;
5618 	enum cdp_host_reo_dest_ring reo_dest;
5619 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5620 	struct dp_vdev *vdev =
5621 			dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
5622 	struct dp_peer *peer =
5623 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id);
5624 
5625 	if (!vdev || !peer || peer->delete_in_progress) {
5626 		status = QDF_STATUS_E_FAILURE;
5627 		goto fail;
5628 	}
5629 
5630 	pdev = vdev->pdev;
5631 	dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
5632 
5633 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
5634 		pdev->pdev_id, vdev->vdev_id,
5635 		vdev->opmode, hash_based, reo_dest);
5636 
5637 
5638 	/*
5639 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
5640 	 * i.e both the devices have same MAC address. In these
5641 	 * cases we want such pkts to be processed in NULL Q handler
5642 	 * which is REO2TCL ring. for this reason we should
5643 	 * not setup reo_queues and default route for bss_peer.
5644 	 */
5645 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
5646 		status = QDF_STATUS_E_FAILURE;
5647 		goto fail;
5648 	}
5649 
5650 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
5651 		/* TODO: Check the destination ring number to be passed to FW */
5652 		soc->cdp_soc.ol_ops->peer_set_default_routing(
5653 				soc->ctrl_psoc,
5654 				peer->vdev->pdev->pdev_id,
5655 				peer->mac_addr.raw,
5656 				peer->vdev->vdev_id, hash_based, reo_dest);
5657 	}
5658 
5659 	qdf_atomic_set(&peer->is_default_route_set, 1);
5660 
5661 	dp_peer_rx_init(pdev, peer);
5662 	dp_peer_tx_init(pdev, peer);
5663 
5664 	dp_peer_ppdu_delayed_ba_init(peer);
5665 
5666 fail:
5667 	if (peer)
5668 		dp_peer_unref_delete(peer);
5669 	return status;
5670 }
5671 
5672 /*
5673  * dp_cp_peer_del_resp_handler - Handle the peer delete response
5674  * @soc_hdl: Datapath SOC handle
5675  * @vdev_id: id of virtual device object
5676  * @mac_addr: Mac address of the peer
5677  *
5678  * Return: QDF_STATUS
5679  */
5680 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
5681 					      uint8_t vdev_id,
5682 					      uint8_t *mac_addr)
5683 {
5684 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5685 	struct dp_ast_entry  *ast_entry = NULL;
5686 	txrx_ast_free_cb cb = NULL;
5687 	void *cookie;
5688 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
5689 
5690 	if (!vdev)
5691 		return QDF_STATUS_E_FAILURE;
5692 
5693 	qdf_spin_lock_bh(&soc->ast_lock);
5694 
5695 	if (soc->ast_override_support)
5696 		ast_entry =
5697 			dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
5698 							vdev->pdev->pdev_id);
5699 	else
5700 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
5701 
5702 	/* in case of qwrap we have multiple BSS peers
5703 	 * with same mac address
5704 	 *
5705 	 * AST entry for this mac address will be created
5706 	 * only for one peer hence it will be NULL here
5707 	 */
5708 	if (!ast_entry || ast_entry->peer || !ast_entry->delete_in_progress) {
5709 		qdf_spin_unlock_bh(&soc->ast_lock);
5710 		return QDF_STATUS_E_FAILURE;
5711 	}
5712 
5713 	if (ast_entry->is_mapped)
5714 		soc->ast_table[ast_entry->ast_idx] = NULL;
5715 
5716 	DP_STATS_INC(soc, ast.deleted, 1);
5717 	dp_peer_ast_hash_remove(soc, ast_entry);
5718 
5719 	cb = ast_entry->callback;
5720 	cookie = ast_entry->cookie;
5721 	ast_entry->callback = NULL;
5722 	ast_entry->cookie = NULL;
5723 
5724 	soc->num_ast_entries--;
5725 	qdf_spin_unlock_bh(&soc->ast_lock);
5726 
5727 	if (cb) {
5728 		cb(soc->ctrl_psoc,
5729 		   dp_soc_to_cdp_soc(soc),
5730 		   cookie,
5731 		   CDP_TXRX_AST_DELETED);
5732 	}
5733 	qdf_mem_free(ast_entry);
5734 
5735 	return QDF_STATUS_SUCCESS;
5736 }
5737 
5738 /*
5739  * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
5740  * @vdev_handle: virtual device object
5741  * @htt_pkt_type: type of pkt
5742  *
5743  * Return: void
5744  */
5745 static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
5746 	 enum htt_cmn_pkt_type val)
5747 {
5748 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5749 	vdev->tx_encap_type = val;
5750 }
5751 
5752 /*
5753  * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
5754  * @vdev_handle: virtual device object
5755  * @htt_pkt_type: type of pkt
5756  *
5757  * Return: void
5758  */
5759 static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
5760 	 enum htt_cmn_pkt_type val)
5761 {
5762 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5763 	vdev->rx_decap_type = val;
5764 }
5765 
5766 /*
5767  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
5768  * @txrx_soc: cdp soc handle
5769  * @ac: Access category
5770  * @value: timeout value in millisec
5771  *
5772  * Return: void
5773  */
5774 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
5775 				    uint8_t ac, uint32_t value)
5776 {
5777 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5778 
5779 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
5780 }
5781 
5782 /*
5783  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
5784  * @txrx_soc: cdp soc handle
5785  * @ac: access category
5786  * @value: timeout value in millisec
5787  *
5788  * Return: void
5789  */
5790 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
5791 				    uint8_t ac, uint32_t *value)
5792 {
5793 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5794 
5795 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
5796 }
5797 
5798 /*
5799  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
5800  * @pdev_handle: physical device object
5801  * @val: reo destination ring index (1 - 4)
5802  *
5803  * Return: void
5804  */
5805 static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
5806 	 enum cdp_host_reo_dest_ring val)
5807 {
5808 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5809 
5810 	if (pdev)
5811 		pdev->reo_dest = val;
5812 }
5813 
5814 /*
5815  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
5816  * @pdev_handle: physical device object
5817  *
5818  * Return: reo destination ring index
5819  */
5820 static enum cdp_host_reo_dest_ring
5821 dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
5822 {
5823 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5824 
5825 	if (pdev)
5826 		return pdev->reo_dest;
5827 	else
5828 		return cdp_host_reo_dest_ring_unknown;
5829 }
5830 
5831 /*
5832  * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
5833  * @pdev_handle: device object
5834  * @val: value to be set
5835  *
5836  * Return: void
5837  */
5838 static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
5839 	 uint32_t val)
5840 {
5841 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5842 
5843 	/* Enable/Disable smart mesh filtering. This flag will be checked
5844 	 * during rx processing to check if packets are from NAC clients.
5845 	 */
5846 	pdev->filter_neighbour_peers = val;
5847 	return 0;
5848 }
5849 
5850 /*
5851  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
5852  * address for smart mesh filtering
5853  * @vdev_handle: virtual device object
5854  * @cmd: Add/Del command
5855  * @macaddr: nac client mac address
5856  *
5857  * Return: void
5858  */
5859 static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
5860 					    uint32_t cmd, uint8_t *macaddr)
5861 {
5862 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5863 	struct dp_pdev *pdev = vdev->pdev;
5864 	struct dp_neighbour_peer *peer = NULL;
5865 
5866 	if (!macaddr)
5867 		goto fail0;
5868 
5869 	/* Store address of NAC (neighbour peer) which will be checked
5870 	 * against TA of received packets.
5871 	 */
5872 	if (cmd == DP_NAC_PARAM_ADD) {
5873 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
5874 				sizeof(*peer));
5875 
5876 		if (!peer) {
5877 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5878 				FL("DP neighbour peer node memory allocation failed"));
5879 			goto fail0;
5880 		}
5881 
5882 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
5883 			macaddr, QDF_MAC_ADDR_SIZE);
5884 		peer->vdev = vdev;
5885 
5886 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5887 
5888 		/* add this neighbour peer into the list */
5889 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
5890 				neighbour_peer_list_elem);
5891 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5892 
5893 		/* first neighbour */
5894 		if (!pdev->neighbour_peers_added) {
5895 			pdev->neighbour_peers_added = true;
5896 			dp_ppdu_ring_cfg(pdev);
5897 		}
5898 		return 1;
5899 
5900 	} else if (cmd == DP_NAC_PARAM_DEL) {
5901 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5902 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
5903 				neighbour_peer_list_elem) {
5904 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
5905 				macaddr, QDF_MAC_ADDR_SIZE)) {
5906 				/* delete this peer from the list */
5907 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
5908 					peer, neighbour_peer_list_elem);
5909 				qdf_mem_free(peer);
5910 				break;
5911 			}
5912 		}
5913 		/* last neighbour deleted */
5914 		if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) {
5915 			pdev->neighbour_peers_added = false;
5916 			dp_ppdu_ring_cfg(pdev);
5917 		}
5918 
5919 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5920 
5921 		if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
5922 		    !pdev->enhanced_stats_en)
5923 			dp_ppdu_ring_reset(pdev);
5924 		return 1;
5925 
5926 	}
5927 
5928 fail0:
5929 	return 0;
5930 }
5931 
5932 /*
5933  * dp_get_sec_type() - Get the security type
5934  * @peer:		Datapath peer handle
5935  * @sec_idx:    Security id (mcast, ucast)
5936  *
5937  * return sec_type: Security type
5938  */
5939 static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
5940 {
5941 	struct dp_peer *dpeer = (struct dp_peer *)peer;
5942 
5943 	return dpeer->security[sec_idx].sec_type;
5944 }
5945 
5946 /*
5947  * dp_peer_authorize() - authorize txrx peer
5948  * @peer_handle:		Datapath peer handle
5949  * @authorize
5950  *
5951  */
5952 static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
5953 {
5954 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5955 	struct dp_soc *soc;
5956 
5957 	if (peer) {
5958 		soc = peer->vdev->pdev->soc;
5959 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
5960 		peer->authorize = authorize ? 1 : 0;
5961 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5962 	}
5963 }
5964 
5965 /*
5966  * dp_vdev_reset_peer() - Update peer related member in vdev
5967 			  as peer is going to free
5968  * @vdev: datapath vdev handle
5969  * @peer: dataptah peer handle
5970  *
5971  * Return: None
5972  */
5973 static void dp_vdev_reset_peer(struct dp_vdev *vdev,
5974 			       struct dp_peer *peer)
5975 {
5976 	struct dp_peer *bss_peer = NULL;
5977 
5978 	if (!vdev) {
5979 		dp_err("vdev is NULL");
5980 	} else {
5981 		if (vdev->vap_bss_peer == peer)
5982 		    vdev->vap_bss_peer = NULL;
5983 
5984 		if (vdev && vdev->vap_bss_peer) {
5985 		    bss_peer = vdev->vap_bss_peer;
5986 		    DP_UPDATE_STATS(vdev, peer);
5987 		}
5988 	}
5989 }
5990 
5991 /*
5992  * dp_peer_release_mem() - free dp peer handle memory
5993  * @soc: dataptah soc handle
5994  * @pdev: datapath pdev handle
5995  * @peer: datapath peer handle
5996  * @vdev_opmode: Vdev operation mode
5997  * @vdev_mac_addr: Vdev Mac address
5998  *
5999  * Return: None
6000  */
6001 static void dp_peer_release_mem(struct dp_soc *soc,
6002 				struct dp_pdev *pdev,
6003 				struct dp_peer *peer,
6004 				enum wlan_op_mode vdev_opmode,
6005 				uint8_t *vdev_mac_addr)
6006 {
6007 	if (soc->cdp_soc.ol_ops->peer_unref_delete)
6008 		soc->cdp_soc.ol_ops->peer_unref_delete(
6009 				soc->ctrl_psoc,
6010 				pdev->pdev_id,
6011 				peer->mac_addr.raw, vdev_mac_addr,
6012 				vdev_opmode);
6013 
6014 	/*
6015 	 * Peer AST list hast to be empty here
6016 	 */
6017 	DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
6018 
6019 	qdf_mem_free(peer);
6020 }
6021 
6022 /**
6023  * dp_delete_pending_vdev() - check and process vdev delete
6024  * @pdev: DP specific pdev pointer
6025  * @vdev: DP specific vdev pointer
6026  * @vdev_id: vdev id corresponding to vdev
6027  *
6028  * This API does following:
6029  * 1) It releases tx flow pools buffers as vdev is
6030  *    going down and no peers are associated.
6031  * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory
6032  */
6033 static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev,
6034 				   uint8_t vdev_id)
6035 {
6036 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
6037 	void *vdev_delete_context = NULL;
6038 
6039 	vdev_delete_cb = vdev->delete.callback;
6040 	vdev_delete_context = vdev->delete.context;
6041 
6042 	dp_info("deleting vdev object %pK (%pM)- its last peer is done",
6043 		vdev, vdev->mac_addr.raw);
6044 	/* all peers are gone, go ahead and delete it */
6045 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
6046 			FLOW_TYPE_VDEV, vdev_id);
6047 	dp_tx_vdev_detach(vdev);
6048 
6049 	pdev->soc->vdev_id_map[vdev_id] = NULL;
6050 
6051 	if (wlan_op_mode_monitor == vdev->opmode) {
6052 		pdev->monitor_vdev = NULL;
6053 	} else {
6054 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
6055 		TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
6056 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6057 	}
6058 
6059 	dp_info("deleting vdev object %pK (%pM)",
6060 		vdev, vdev->mac_addr.raw);
6061 	qdf_mem_free(vdev);
6062 	vdev = NULL;
6063 
6064 	if (vdev_delete_cb)
6065 		vdev_delete_cb(vdev_delete_context);
6066 }
6067 
6068 /*
6069  * dp_peer_unref_delete() - unref and delete peer
6070  * @peer_handle:		Datapath peer handle
6071  *
6072  */
6073 void dp_peer_unref_delete(struct dp_peer *peer)
6074 {
6075 	struct dp_vdev *vdev = peer->vdev;
6076 	struct dp_pdev *pdev = vdev->pdev;
6077 	struct dp_soc *soc = pdev->soc;
6078 	struct dp_peer *tmppeer;
6079 	int found = 0;
6080 	uint16_t peer_id;
6081 	uint16_t vdev_id;
6082 	bool vdev_delete = false;
6083 	struct cdp_peer_cookie peer_cookie;
6084 	enum wlan_op_mode vdev_opmode;
6085 	uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE];
6086 
6087 
6088 	/*
6089 	 * Hold the lock all the way from checking if the peer ref count
6090 	 * is zero until the peer references are removed from the hash
6091 	 * table and vdev list (if the peer ref count is zero).
6092 	 * This protects against a new HL tx operation starting to use the
6093 	 * peer object just after this function concludes it's done being used.
6094 	 * Furthermore, the lock needs to be held while checking whether the
6095 	 * vdev's list of peers is empty, to make sure that list is not modified
6096 	 * concurrently with the empty check.
6097 	 */
6098 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
6099 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
6100 		peer_id = peer->peer_ids[0];
6101 		vdev_id = vdev->vdev_id;
6102 
6103 		/*
6104 		 * Make sure that the reference to the peer in
6105 		 * peer object map is removed
6106 		 */
6107 		if (peer_id != HTT_INVALID_PEER)
6108 			soc->peer_id_to_obj_map[peer_id] = NULL;
6109 
6110 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
6111 			  "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
6112 
6113 		/* remove the reference to the peer from the hash table */
6114 		dp_peer_find_hash_remove(soc, peer);
6115 
6116 		qdf_spin_lock_bh(&soc->ast_lock);
6117 		if (peer->self_ast_entry) {
6118 			dp_peer_del_ast(soc, peer->self_ast_entry);
6119 			peer->self_ast_entry = NULL;
6120 		}
6121 		qdf_spin_unlock_bh(&soc->ast_lock);
6122 
6123 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
6124 			if (tmppeer == peer) {
6125 				found = 1;
6126 				break;
6127 			}
6128 		}
6129 
6130 		if (found) {
6131 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
6132 				peer_list_elem);
6133 		} else {
6134 			/*Ignoring the remove operation as peer not found*/
6135 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
6136 				  "peer:%pK not found in vdev:%pK peerlist:%pK",
6137 				  peer, vdev, &peer->vdev->peer_list);
6138 		}
6139 
6140 		/* send peer destroy event to upper layer */
6141 		qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
6142 			     QDF_MAC_ADDR_SIZE);
6143 		peer_cookie.ctx = NULL;
6144 		peer_cookie.ctx = (struct cdp_stats_cookie *)
6145 					peer->wlanstats_ctx;
6146 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6147 		dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
6148 				     pdev->soc,
6149 				     (void *)&peer_cookie,
6150 				     peer->peer_ids[0],
6151 				     WDI_NO_VAL,
6152 				     pdev->pdev_id);
6153 #endif
6154 		peer->wlanstats_ctx = NULL;
6155 
6156 		/* cleanup the peer data */
6157 		dp_peer_cleanup(vdev, peer, false);
6158 		/* reset this peer related info in vdev */
6159 		dp_vdev_reset_peer(vdev, peer);
6160 		/* save vdev related member in case vdev freed */
6161 		vdev_opmode = vdev->opmode;
6162 		qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw,
6163 			     QDF_MAC_ADDR_SIZE);
6164 		/*
6165 		 * check whether the parent vdev is pending for deleting
6166 		 * and no peers left.
6167 		 */
6168 		if (vdev->delete.pending && TAILQ_EMPTY(&vdev->peer_list))
6169 			vdev_delete = true;
6170 		/*
6171 		 * Now that there are no references to the peer, we can
6172 		 * release the peer reference lock.
6173 		 */
6174 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6175 
6176 		/*
6177 		 * Invoke soc.ol_ops->peer_unref_delete out of
6178 		 * peer_ref_mutex in case deadlock issue.
6179 		 */
6180 		dp_peer_release_mem(soc, pdev, peer,
6181 				    vdev_opmode,
6182 				    vdev_mac_addr);
6183 		/*
6184 		 * Delete the vdev if it's waiting all peer deleted
6185 		 * and it's chance now.
6186 		 */
6187 		if (vdev_delete)
6188 			dp_delete_pending_vdev(pdev, vdev, vdev_id);
6189 
6190 	} else {
6191 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6192 	}
6193 }
6194 
6195 #ifdef PEER_CACHE_RX_PKTS
6196 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
6197 {
6198 	dp_rx_flush_rx_cached(peer, true);
6199 	qdf_list_destroy(&peer->bufq_info.cached_bufq);
6200 	qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
6201 }
6202 #else
6203 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
6204 {
6205 }
6206 #endif
6207 
6208 /*
6209  * dp_peer_detach_wifi3() – Detach txrx peer
6210  * @soc: soc handle
6211  * @vdev_id: id of dp handle
6212  * @peer_mac: mac of datapath PEER handle
6213  * @bitmap: bitmap indicating special handling of request.
6214  *
6215  */
6216 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
6217 				       uint8_t *peer_mac, uint32_t bitmap)
6218 {
6219 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
6220 						      peer_mac, 0, vdev_id);
6221 
6222 	/* Peer can be null for monitor vap mac address */
6223 	if (!peer) {
6224 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
6225 			  "%s: Invalid peer\n", __func__);
6226 		return QDF_STATUS_E_FAILURE;
6227 	}
6228 
6229 	peer->valid = 0;
6230 
6231 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
6232 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
6233 
6234 	dp_local_peer_id_free(peer->vdev->pdev, peer);
6235 
6236 	dp_peer_rx_bufq_resources_deinit(peer);
6237 
6238 	qdf_spinlock_destroy(&peer->peer_info_lock);
6239 	dp_peer_multipass_list_remove(peer);
6240 
6241 	/*
6242 	 * Remove the reference added during peer_attach.
6243 	 * The peer will still be left allocated until the
6244 	 * PEER_UNMAP message arrives to remove the other
6245 	 * reference, added by the PEER_MAP message.
6246 	 */
6247 	dp_peer_unref_delete(peer);
6248 	/*
6249 	 * Remove the reference taken above
6250 	 */
6251 	dp_peer_unref_delete(peer);
6252 
6253 	return QDF_STATUS_SUCCESS;
6254 }
6255 
6256 /*
6257  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
6258  * @soc_hdl: Datapath soc handle
6259  * @vdev_id: virtual interface id
6260  *
6261  * Return: MAC address on success, NULL on failure.
6262  *
6263  */
6264 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl,
6265 					 uint8_t vdev_id)
6266 {
6267 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6268 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
6269 
6270 	if (!vdev)
6271 		return NULL;
6272 
6273 	return vdev->mac_addr.raw;
6274 }
6275 
6276 /*
6277  * dp_vdev_set_wds() - Enable per packet stats
6278  * @soc: DP soc handle
6279  * @vdev_id: id of DP VDEV handle
6280  * @val: value
6281  *
6282  * Return: none
6283  */
6284 static int dp_vdev_set_wds(struct cdp_soc_t *soc, uint8_t vdev_id, uint32_t val)
6285 {
6286 	struct dp_vdev *vdev =
6287 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
6288 						   vdev_id);
6289 
6290 	if (!vdev)
6291 		return QDF_STATUS_E_FAILURE;
6292 
6293 	vdev->wds_enabled = val;
6294 	return QDF_STATUS_SUCCESS;
6295 }
6296 
6297 /*
6298  * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode
6299  * @soc_hdl: datapath soc handle
6300  * @pdev_id: physical device instance id
6301  *
6302  * Return: virtual interface id
6303  */
6304 static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl,
6305 					       uint8_t pdev_id)
6306 {
6307 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6308 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
6309 
6310 	if (qdf_unlikely(!pdev))
6311 		return -EINVAL;
6312 
6313 	return pdev->monitor_vdev->vdev_id;
6314 }
6315 
6316 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
6317 {
6318 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6319 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
6320 
6321 	if (!vdev) {
6322 		dp_err("vdev for id %d is NULL", vdev_id);
6323 		return -EINVAL;
6324 	}
6325 
6326 	return vdev->opmode;
6327 }
6328 
6329 /**
6330  * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev
6331  * @soc_hdl: ol_txrx_soc_handle handle
6332  * @vdev_id: vdev id for which os rx handles are needed
6333  * @stack_fn_p: pointer to stack function pointer
6334  * @osif_handle_p: pointer to ol_osif_vdev_handle
6335  *
6336  * Return: void
6337  */
6338 static
6339 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl,
6340 					  uint8_t vdev_id,
6341 					  ol_txrx_rx_fp *stack_fn_p,
6342 					  ol_osif_vdev_handle *osif_vdev_p)
6343 {
6344 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6345 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
6346 
6347 	if (!vdev)
6348 		return;
6349 
6350 	*stack_fn_p = vdev->osif_rx_stack;
6351 	*osif_vdev_p = vdev->osif_vdev;
6352 }
6353 
6354 /**
6355  * dp_get_ctrl_pdev_from_vdev() - Get control pdev of vdev
6356  * @soc_hdl: datapath soc handle
6357  * @vdev_id: virtual device/interface id
6358  *
6359  * Return: Handle to control pdev
6360  */
6361 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(
6362 						struct cdp_soc_t *soc_hdl,
6363 						uint8_t vdev_id)
6364 {
6365 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6366 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
6367 	struct dp_pdev *pdev;
6368 
6369 	if (!vdev || !vdev->pdev)
6370 		return NULL;
6371 
6372 	pdev = vdev->pdev;
6373 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
6374 }
6375 
6376 /**
6377  * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
6378  *                                 ring based on target
6379  * @soc: soc handle
6380  * @mac_for_pdev: pdev_id
6381  * @pdev: physical device handle
6382  * @ring_num: mac id
6383  * @htt_tlv_filter: tlv filter
6384  *
6385  * Return: zero on success, non-zero on failure
6386  */
6387 static inline
6388 QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
6389 				       struct dp_pdev *pdev, uint8_t ring_num,
6390 				       struct htt_rx_ring_tlv_filter htt_tlv_filter)
6391 {
6392 	QDF_STATUS status;
6393 
6394 	if (soc->wlan_cfg_ctx->rxdma1_enable)
6395 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6396 					     pdev->rxdma_mon_buf_ring[ring_num]
6397 					     .hal_srng,
6398 					     RXDMA_MONITOR_BUF, RX_BUFFER_SIZE,
6399 					     &htt_tlv_filter);
6400 	else
6401 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6402 					     pdev->rx_mac_buf_ring[ring_num]
6403 					     .hal_srng,
6404 					     RXDMA_BUF, RX_BUFFER_SIZE,
6405 					     &htt_tlv_filter);
6406 
6407 	return status;
6408 }
6409 
6410 static inline void
6411 dp_pdev_disable_mcopy_code(struct dp_pdev *pdev)
6412 {
6413 	pdev->mcopy_mode = 0;
6414 	qdf_nbuf_queue_free(&pdev->rx_ppdu_buf_q);
6415 }
6416 
6417 /**
6418  * dp_reset_monitor_mode() - Disable monitor mode
6419  * @pdev_handle: Datapath PDEV handle
6420  *
6421  * Return: QDF_STATUS
6422  */
6423 QDF_STATUS dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
6424 {
6425 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6426 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
6427 	struct dp_soc *soc = pdev->soc;
6428 	uint8_t pdev_id;
6429 	int mac_id;
6430 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6431 
6432 	pdev_id = pdev->pdev_id;
6433 	soc = pdev->soc;
6434 
6435 	qdf_spin_lock_bh(&pdev->mon_lock);
6436 
6437 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
6438 
6439 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6440 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
6441 
6442 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
6443 						     pdev, mac_id,
6444 						     htt_tlv_filter);
6445 
6446 		if (status != QDF_STATUS_SUCCESS) {
6447 			dp_err("Failed to send tlv filter for monitor mode rings");
6448 			qdf_spin_unlock_bh(&pdev->mon_lock);
6449 			return status;
6450 		}
6451 
6452 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6453 			    pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6454 			    RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE,
6455 			    &htt_tlv_filter);
6456 	}
6457 
6458 	pdev->monitor_vdev = NULL;
6459 	if (pdev->mcopy_mode)
6460 		dp_pdev_disable_mcopy_code(pdev);
6461 	pdev->monitor_configured = false;
6462 
6463 	qdf_spin_unlock_bh(&pdev->mon_lock);
6464 
6465 	return QDF_STATUS_SUCCESS;
6466 }
6467 
6468 /**
6469  * dp_set_nac() - set peer_nac
6470  * @soc: soc handle
6471  * @vdev_id: id of dp handle
6472  * @peer_mac: mac of datapath PEER handle
6473  *
6474  * Return: void
6475  */
6476 static void dp_set_nac(struct cdp_soc_t *soc, uint8_t vdev_id,
6477 		       uint8_t *peer_mac)
6478 {
6479 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
6480 						       peer_mac, 0, vdev_id);
6481 
6482 	if (!peer || peer->delete_in_progress)
6483 		goto fail;
6484 
6485 	peer->nac = 1;
6486 
6487 fail:
6488 	if (peer)
6489 		dp_peer_unref_delete(peer);
6490 
6491 	return;
6492 }
6493 
6494 /**
6495  * dp_get_tx_pending() - read pending tx
6496  * @pdev_handle: Datapath PDEV handle
6497  *
6498  * Return: outstanding tx
6499  */
6500 static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
6501 {
6502 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6503 
6504 	return qdf_atomic_read(&pdev->num_tx_outstanding);
6505 }
6506 
6507 /**
6508  * dp_get_peer_mac_from_peer_id() - get peer mac
6509  * @pdev_handle: Datapath PDEV handle
6510  * @peer_id: Peer ID
6511  * @peer_mac: MAC addr of PEER
6512  *
6513  * Return: QDF_STATUS
6514  */
6515 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
6516 					       uint32_t peer_id,
6517 					       uint8_t *peer_mac)
6518 {
6519 	struct dp_peer *peer;
6520 
6521 	if (soc && peer_mac) {
6522 		peer = dp_peer_find_by_id((struct dp_soc *)soc,
6523 					  (uint16_t)peer_id);
6524 		if (peer) {
6525 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
6526 				     QDF_MAC_ADDR_SIZE);
6527 			dp_peer_unref_del_find_by_id(peer);
6528 			return QDF_STATUS_SUCCESS;
6529 		}
6530 	}
6531 
6532 	return QDF_STATUS_E_FAILURE;
6533 }
6534 
6535 /**
6536  * dp_pdev_configure_monitor_rings() - configure monitor rings
6537  * @vdev_handle: Datapath VDEV handle
6538  *
6539  * Return: QDF_STATUS
6540  */
6541 QDF_STATUS dp_pdev_configure_monitor_rings(struct dp_pdev *pdev)
6542 {
6543 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
6544 	struct dp_soc *soc;
6545 	uint8_t pdev_id;
6546 	int mac_id;
6547 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6548 
6549 	pdev_id = pdev->pdev_id;
6550 	soc = pdev->soc;
6551 
6552 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
6553 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
6554 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
6555 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
6556 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
6557 		pdev->mo_data_filter);
6558 
6559 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
6560 
6561 	htt_tlv_filter.mpdu_start = 1;
6562 	htt_tlv_filter.msdu_start = 1;
6563 	htt_tlv_filter.packet = 1;
6564 	htt_tlv_filter.msdu_end = 1;
6565 	htt_tlv_filter.mpdu_end = 1;
6566 	htt_tlv_filter.packet_header = 1;
6567 	htt_tlv_filter.attention = 1;
6568 	htt_tlv_filter.ppdu_start = 0;
6569 	htt_tlv_filter.ppdu_end = 0;
6570 	htt_tlv_filter.ppdu_end_user_stats = 0;
6571 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
6572 	htt_tlv_filter.ppdu_end_status_done = 0;
6573 	htt_tlv_filter.header_per_msdu = 1;
6574 	htt_tlv_filter.enable_fp =
6575 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
6576 	htt_tlv_filter.enable_md = 0;
6577 	htt_tlv_filter.enable_mo =
6578 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
6579 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
6580 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
6581 
6582 	if (pdev->mcopy_mode) {
6583 		htt_tlv_filter.fp_data_filter = 0;
6584 		htt_tlv_filter.mo_data_filter = 0;
6585 	} else {
6586 		htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
6587 		htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
6588 	}
6589 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
6590 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
6591 	htt_tlv_filter.offset_valid = false;
6592 
6593 	if ((pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU) ||
6594 	    (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU_MSDU)) {
6595 		htt_tlv_filter.fp_mgmt_filter = 0;
6596 		htt_tlv_filter.fp_ctrl_filter = 0;
6597 		htt_tlv_filter.fp_data_filter = 0;
6598 		htt_tlv_filter.mo_mgmt_filter = 0;
6599 		htt_tlv_filter.mo_ctrl_filter = 0;
6600 		htt_tlv_filter.mo_data_filter = 0;
6601 	}
6602 
6603 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6604 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
6605 
6606 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
6607 						     pdev, mac_id,
6608 						     htt_tlv_filter);
6609 
6610 		if (status != QDF_STATUS_SUCCESS) {
6611 			dp_err("Failed to send tlv filter for monitor mode rings");
6612 			return status;
6613 		}
6614 	}
6615 
6616 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
6617 
6618 	htt_tlv_filter.mpdu_start = 1;
6619 	htt_tlv_filter.msdu_start = 0;
6620 	htt_tlv_filter.packet = 0;
6621 	htt_tlv_filter.msdu_end = 0;
6622 	htt_tlv_filter.mpdu_end = 0;
6623 	if ((pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU) ||
6624 	    (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU_MSDU)) {
6625 		htt_tlv_filter.mpdu_end = 1;
6626 	}
6627 	htt_tlv_filter.attention = 0;
6628 	htt_tlv_filter.ppdu_start = 1;
6629 	htt_tlv_filter.ppdu_end = 1;
6630 	htt_tlv_filter.ppdu_end_user_stats = 1;
6631 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6632 	htt_tlv_filter.ppdu_end_status_done = 1;
6633 	htt_tlv_filter.enable_fp = 1;
6634 	htt_tlv_filter.enable_md = 0;
6635 	htt_tlv_filter.enable_mo = 1;
6636 	if (pdev->mcopy_mode ||
6637 	    (pdev->rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) {
6638 		htt_tlv_filter.packet_header = 1;
6639 		if (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU) {
6640 			htt_tlv_filter.header_per_msdu = 0;
6641 			htt_tlv_filter.enable_mo = 0;
6642 		} else if (pdev->rx_enh_capture_mode ==
6643 			   CDP_RX_ENH_CAPTURE_MPDU_MSDU) {
6644 			bool is_rx_mon_proto_flow_tag_enabled =
6645 			    wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(
6646 						    soc->wlan_cfg_ctx);
6647 			htt_tlv_filter.header_per_msdu = 1;
6648 			htt_tlv_filter.enable_mo = 0;
6649 			if (pdev->is_rx_enh_capture_trailer_enabled ||
6650 			    is_rx_mon_proto_flow_tag_enabled)
6651 				htt_tlv_filter.msdu_end = 1;
6652 		}
6653 	}
6654 
6655 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6656 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6657 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6658 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6659 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6660 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
6661 	htt_tlv_filter.offset_valid = false;
6662 
6663 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6664 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6665 						pdev->pdev_id);
6666 
6667 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6668 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6669 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6670 	}
6671 
6672 	return status;
6673 }
6674 
6675 /**
6676  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
6677  * @vdev_handle: Datapath VDEV handle
6678  * @smart_monitor: Flag to denote if its smart monitor mode
6679  *
6680  * Return: 0 on success, not 0 on failure
6681  */
6682 static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *soc,
6683 					   uint8_t vdev_id,
6684 					   uint8_t special_monitor)
6685 {
6686 	struct dp_pdev *pdev;
6687 	struct dp_vdev *vdev =
6688 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
6689 						   vdev_id);
6690 
6691 	if (!vdev)
6692 		return QDF_STATUS_E_FAILURE;
6693 
6694 	pdev = vdev->pdev;
6695 	pdev->monitor_vdev = vdev;
6696 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
6697 		  "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
6698 		  pdev, pdev->pdev_id, pdev->soc, vdev);
6699 
6700 	/*
6701 	 * do not configure monitor buf ring and filter for smart and
6702 	 * lite monitor
6703 	 * for smart monitor filters are added along with first NAC
6704 	 * for lite monitor required configuration done through
6705 	 * dp_set_pdev_param
6706 	 */
6707 	if (special_monitor)
6708 		return QDF_STATUS_SUCCESS;
6709 
6710 	/*Check if current pdev's monitor_vdev exists */
6711 	if (pdev->monitor_configured) {
6712 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
6713 			  "monitor vap already created vdev=%pK\n", vdev);
6714 		return QDF_STATUS_E_RESOURCES;
6715 	}
6716 
6717 	pdev->monitor_configured = true;
6718 
6719 	dp_mon_buf_delayed_replenish(pdev);
6720 
6721 	return dp_pdev_configure_monitor_rings(pdev);
6722 }
6723 
6724 /**
6725  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
6726  * @pdev_handle: Datapath PDEV handle
6727  * @filter_val: Flag to select Filter for monitor mode
6728  * Return: 0 on success, not 0 on failure
6729  */
6730 static QDF_STATUS
6731 dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
6732 				   struct cdp_monitor_filter *filter_val)
6733 {
6734 	/* Many monitor VAPs can exists in a system but only one can be up at
6735 	 * anytime
6736 	 */
6737 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6738 	struct dp_vdev *vdev = pdev->monitor_vdev;
6739 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
6740 	struct dp_soc *soc;
6741 	uint8_t pdev_id;
6742 	int mac_id;
6743 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6744 
6745 	pdev_id = pdev->pdev_id;
6746 	soc = pdev->soc;
6747 
6748 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
6749 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
6750 		pdev, pdev_id, soc, vdev);
6751 
6752 	/*Check if current pdev's monitor_vdev exists */
6753 	if (!pdev->monitor_vdev) {
6754 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6755 			"vdev=%pK", vdev);
6756 		qdf_assert(vdev);
6757 	}
6758 
6759 	/* update filter mode, type in pdev structure */
6760 	pdev->mon_filter_mode = filter_val->mode;
6761 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
6762 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
6763 	pdev->fp_data_filter = filter_val->fp_data;
6764 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
6765 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
6766 	pdev->mo_data_filter = filter_val->mo_data;
6767 
6768 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
6769 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
6770 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
6771 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
6772 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
6773 		pdev->mo_data_filter);
6774 
6775 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
6776 
6777 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6778 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
6779 
6780 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
6781 						     pdev, mac_id,
6782 						     htt_tlv_filter);
6783 
6784 		if (status != QDF_STATUS_SUCCESS) {
6785 			dp_err("Failed to send tlv filter for monitor mode rings");
6786 			return status;
6787 		}
6788 
6789 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6790 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6791 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6792 	}
6793 
6794 	htt_tlv_filter.mpdu_start = 1;
6795 	htt_tlv_filter.msdu_start = 1;
6796 	htt_tlv_filter.packet = 1;
6797 	htt_tlv_filter.msdu_end = 1;
6798 	htt_tlv_filter.mpdu_end = 1;
6799 	htt_tlv_filter.packet_header = 1;
6800 	htt_tlv_filter.attention = 1;
6801 	htt_tlv_filter.ppdu_start = 0;
6802 	htt_tlv_filter.ppdu_end = 0;
6803 	htt_tlv_filter.ppdu_end_user_stats = 0;
6804 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
6805 	htt_tlv_filter.ppdu_end_status_done = 0;
6806 	htt_tlv_filter.header_per_msdu = 1;
6807 	htt_tlv_filter.enable_fp =
6808 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
6809 	htt_tlv_filter.enable_md = 0;
6810 	htt_tlv_filter.enable_mo =
6811 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
6812 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
6813 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
6814 	if (pdev->mcopy_mode)
6815 		htt_tlv_filter.fp_data_filter = 0;
6816 	else
6817 		htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
6818 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
6819 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
6820 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
6821 	htt_tlv_filter.offset_valid = false;
6822 
6823 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6824 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
6825 
6826 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
6827 						     pdev, mac_id,
6828 						     htt_tlv_filter);
6829 
6830 		if (status != QDF_STATUS_SUCCESS) {
6831 			dp_err("Failed to send tlv filter for monitor mode rings");
6832 			return status;
6833 		}
6834 	}
6835 
6836 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
6837 
6838 	htt_tlv_filter.mpdu_start = 1;
6839 	htt_tlv_filter.msdu_start = 0;
6840 	htt_tlv_filter.packet = 0;
6841 	htt_tlv_filter.msdu_end = 0;
6842 	htt_tlv_filter.mpdu_end = 0;
6843 	htt_tlv_filter.attention = 0;
6844 	htt_tlv_filter.ppdu_start = 1;
6845 	htt_tlv_filter.ppdu_end = 1;
6846 	htt_tlv_filter.ppdu_end_user_stats = 1;
6847 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6848 	htt_tlv_filter.ppdu_end_status_done = 1;
6849 	htt_tlv_filter.enable_fp = 1;
6850 	htt_tlv_filter.enable_md = 0;
6851 	htt_tlv_filter.enable_mo = 1;
6852 	if (pdev->mcopy_mode) {
6853 		htt_tlv_filter.packet_header = 1;
6854 	}
6855 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6856 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6857 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6858 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6859 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6860 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
6861 	htt_tlv_filter.offset_valid = false;
6862 
6863 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6864 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6865 						pdev->pdev_id);
6866 
6867 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6868 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6869 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6870 	}
6871 
6872 	return QDF_STATUS_SUCCESS;
6873 }
6874 
6875 /**
6876  * dp_pdev_set_monitor_channel() - set monitor channel num in pdev
6877  * @pdev_handle: Datapath PDEV handle
6878  *
6879  * Return: None
6880  */
6881 static
6882 void dp_pdev_set_monitor_channel(struct cdp_pdev *pdev_handle, int chan_num)
6883 {
6884 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6885 
6886 	pdev->mon_chan_num = chan_num;
6887 }
6888 
6889 /**
6890  * dp_deliver_tx_mgmt() - Deliver mgmt frame for tx capture
6891  * @pdev_handle: Datapath PDEV handle
6892  * @nbuf: Management frame buffer
6893  */
6894 static void
6895 dp_deliver_tx_mgmt(struct cdp_pdev *pdev_handle, qdf_nbuf_t nbuf)
6896 {
6897 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6898 
6899 	dp_deliver_mgmt_frm(pdev, nbuf);
6900 }
6901 
6902 /**
6903  * dp_set_bsscolor() - sets bsscolor for tx capture
6904  * @pdev_handle: Datapath PDEV handle
6905  * @bsscolor: new bsscolor
6906  */
6907 static void
6908 dp_mon_set_bsscolor(struct cdp_pdev *pdev_handle, uint8_t bsscolor)
6909 {
6910 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6911 
6912 	pdev->rx_mon_recv_status.bsscolor = bsscolor;
6913 }
6914 
6915 /**
6916  * dp_get_pdev_id_frm_pdev() - get pdev_id
6917  * @pdev_handle: Datapath PDEV handle
6918  *
6919  * Return: pdev_id
6920  */
6921 static
6922 uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
6923 {
6924 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6925 
6926 	return pdev->pdev_id;
6927 }
6928 
6929 /**
6930  * dp_get_delay_stats_flag() - get delay stats flag
6931  * @pdev_handle: Datapath PDEV handle
6932  *
6933  * Return: 0 if flag is disabled else 1
6934  */
6935 static
6936 bool dp_get_delay_stats_flag(struct cdp_pdev *pdev_handle)
6937 {
6938 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6939 
6940 	return pdev->delay_stats_flag;
6941 }
6942 
6943 /**
6944  * dp_pdev_set_chan_noise_floor() - set channel noise floor
6945  * @pdev_handle: Datapath PDEV handle
6946  * @chan_noise_floor: Channel Noise Floor
6947  *
6948  * Return: void
6949  */
6950 static
6951 void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
6952 				  int16_t chan_noise_floor)
6953 {
6954 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6955 
6956 	pdev->chan_noise_floor = chan_noise_floor;
6957 }
6958 
6959 /**
6960  * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
6961  * @vdev_handle: Datapath VDEV handle
6962  * Return: true on ucast filter flag set
6963  */
6964 static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
6965 {
6966 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6967 	struct dp_pdev *pdev;
6968 
6969 	pdev = vdev->pdev;
6970 
6971 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
6972 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
6973 		return true;
6974 
6975 	return false;
6976 }
6977 
6978 /**
6979  * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
6980  * @vdev_handle: Datapath VDEV handle
6981  * Return: true on mcast filter flag set
6982  */
6983 static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
6984 {
6985 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6986 	struct dp_pdev *pdev;
6987 
6988 	pdev = vdev->pdev;
6989 
6990 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
6991 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
6992 		return true;
6993 
6994 	return false;
6995 }
6996 
6997 /**
6998  * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
6999  * @vdev_handle: Datapath VDEV handle
7000  * Return: true on non data filter flag set
7001  */
7002 static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
7003 {
7004 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7005 	struct dp_pdev *pdev;
7006 
7007 	pdev = vdev->pdev;
7008 
7009 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
7010 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
7011 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
7012 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
7013 			return true;
7014 		}
7015 	}
7016 
7017 	return false;
7018 }
7019 
7020 #ifdef MESH_MODE_SUPPORT
7021 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
7022 {
7023 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7024 
7025 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7026 		FL("val %d"), val);
7027 	vdev->mesh_vdev = val;
7028 }
7029 
7030 /*
7031  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
7032  * @vdev_hdl: virtual device object
7033  * @val: value to be set
7034  *
7035  * Return: void
7036  */
7037 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
7038 {
7039 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7040 
7041 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7042 		FL("val %d"), val);
7043 	vdev->mesh_rx_filter = val;
7044 }
7045 #endif
7046 
7047 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
7048 {
7049 	uint8_t pdev_count;
7050 
7051 	for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
7052 		if (soc->pdev_list[pdev_count] &&
7053 		    soc->pdev_list[pdev_count] == data)
7054 			return true;
7055 	}
7056 	return false;
7057 }
7058 
7059 /**
7060  * dp_rx_bar_stats_cb(): BAR received stats callback
7061  * @soc: SOC handle
7062  * @cb_ctxt: Call back context
7063  * @reo_status: Reo status
7064  *
7065  * return: void
7066  */
7067 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
7068 	union hal_reo_status *reo_status)
7069 {
7070 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
7071 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
7072 
7073 	if (!dp_check_pdev_exists(soc, pdev)) {
7074 		dp_err_rl("pdev doesn't exist");
7075 		return;
7076 	}
7077 
7078 	if (!qdf_atomic_read(&soc->cmn_init_done))
7079 		return;
7080 
7081 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
7082 		DP_PRINT_STATS("REO stats failure %d",
7083 			       queue_status->header.status);
7084 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
7085 		return;
7086 	}
7087 
7088 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
7089 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
7090 
7091 }
7092 
7093 /**
7094  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
7095  * @vdev: DP VDEV handle
7096  *
7097  * return: void
7098  */
7099 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
7100 			     struct cdp_vdev_stats *vdev_stats)
7101 {
7102 	struct dp_peer *peer = NULL;
7103 	struct dp_soc *soc = NULL;
7104 
7105 	if (!vdev || !vdev->pdev)
7106 		return;
7107 
7108 	soc = vdev->pdev->soc;
7109 
7110 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
7111 
7112 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
7113 		dp_update_vdev_stats(vdev_stats, peer);
7114 
7115 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7116 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7117 			     vdev_stats, vdev->vdev_id,
7118 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
7119 #endif
7120 }
7121 
7122 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
7123 {
7124 	struct dp_vdev *vdev = NULL;
7125 	struct dp_soc *soc;
7126 	struct cdp_vdev_stats *vdev_stats =
7127 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
7128 
7129 	if (!vdev_stats) {
7130 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7131 			  "DP alloc failure - unable to get alloc vdev stats");
7132 		return;
7133 	}
7134 
7135 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
7136 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
7137 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
7138 
7139 	if (pdev->mcopy_mode)
7140 		DP_UPDATE_STATS(pdev, pdev->invalid_peer);
7141 
7142 	soc = pdev->soc;
7143 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
7144 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
7145 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
7146 
7147 		dp_aggregate_vdev_stats(vdev, vdev_stats);
7148 		dp_update_pdev_stats(pdev, vdev_stats);
7149 		dp_update_pdev_ingress_stats(pdev, vdev);
7150 	}
7151 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
7152 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
7153 	qdf_mem_free(vdev_stats);
7154 
7155 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7156 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
7157 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
7158 #endif
7159 }
7160 
7161 /**
7162  * dp_vdev_getstats() - get vdev packet level stats
7163  * @vdev_handle: Datapath VDEV handle
7164  * @stats: cdp network device stats structure
7165  *
7166  * Return: QDF_STATUS
7167  */
7168 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
7169 				   struct cdp_dev_stats *stats)
7170 {
7171 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7172 	struct dp_pdev *pdev;
7173 	struct dp_soc *soc;
7174 	struct cdp_vdev_stats *vdev_stats;
7175 
7176 	if (!vdev)
7177 		return QDF_STATUS_E_FAILURE;
7178 
7179 	pdev = vdev->pdev;
7180 	if (!pdev)
7181 		return QDF_STATUS_E_FAILURE;
7182 
7183 	soc = pdev->soc;
7184 
7185 	vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
7186 
7187 	if (!vdev_stats) {
7188 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7189 			  "DP alloc failure - unable to get alloc vdev stats");
7190 		return QDF_STATUS_E_FAILURE;
7191 	}
7192 
7193 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
7194 	dp_aggregate_vdev_stats(vdev, vdev_stats);
7195 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
7196 
7197 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
7198 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
7199 
7200 	stats->tx_errors = vdev_stats->tx.tx_failed +
7201 		vdev_stats->tx_i.dropped.dropped_pkt.num;
7202 	stats->tx_dropped = stats->tx_errors;
7203 
7204 	stats->rx_packets = vdev_stats->rx.unicast.num +
7205 		vdev_stats->rx.multicast.num +
7206 		vdev_stats->rx.bcast.num;
7207 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
7208 		vdev_stats->rx.multicast.bytes +
7209 		vdev_stats->rx.bcast.bytes;
7210 
7211 	qdf_mem_free(vdev_stats);
7212 
7213 	return QDF_STATUS_SUCCESS;
7214 }
7215 
7216 
7217 /**
7218  * dp_pdev_getstats() - get pdev packet level stats
7219  * @pdev_handle: Datapath PDEV handle
7220  * @stats: cdp network device stats structure
7221  *
7222  * Return: QDF_STATUS
7223  */
7224 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
7225 			     struct cdp_dev_stats *stats)
7226 {
7227 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7228 
7229 	dp_aggregate_pdev_stats(pdev);
7230 
7231 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
7232 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
7233 
7234 	stats->tx_errors = pdev->stats.tx.tx_failed +
7235 		pdev->stats.tx_i.dropped.dropped_pkt.num;
7236 	stats->tx_dropped = stats->tx_errors;
7237 
7238 	stats->rx_packets = pdev->stats.rx.unicast.num +
7239 		pdev->stats.rx.multicast.num +
7240 		pdev->stats.rx.bcast.num;
7241 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
7242 		pdev->stats.rx.multicast.bytes +
7243 		pdev->stats.rx.bcast.bytes;
7244 	stats->rx_errors = pdev->stats.err.desc_alloc_fail +
7245 		pdev->stats.err.ip_csum_err +
7246 		pdev->stats.err.tcp_udp_csum_err +
7247 		pdev->stats.rx.err.mic_err +
7248 		pdev->stats.rx.err.decrypt_err +
7249 		pdev->stats.err.rxdma_error +
7250 		pdev->stats.err.reo_error;
7251 	stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
7252 		pdev->stats.dropped.mec +
7253 		pdev->stats.dropped.mesh_filter +
7254 		pdev->stats.dropped.wifi_parse +
7255 		pdev->stats.dropped.mon_rx_drop +
7256 		pdev->stats.dropped.mon_radiotap_update_err;
7257 }
7258 
7259 /**
7260  * dp_get_device_stats() - get interface level packet stats
7261  * @soc: soc handle
7262  * @id : vdev_id or pdev_id based on type
7263  * @stats: cdp network device stats structure
7264  * @type: device type pdev/vdev
7265  *
7266  * Return: QDF_STATUS
7267  */
7268 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc, uint8_t id,
7269 				      struct cdp_dev_stats *stats,
7270 				      uint8_t type)
7271 {
7272 	switch (type) {
7273 	case UPDATE_VDEV_STATS:
7274 		return dp_vdev_getstats(
7275 			(struct cdp_vdev *)dp_get_vdev_from_soc_vdev_id_wifi3(
7276 			 (struct dp_soc *)soc, id), stats);
7277 	case UPDATE_PDEV_STATS:
7278 		{
7279 			struct dp_pdev *pdev =
7280 				dp_get_pdev_from_soc_pdev_id_wifi3(
7281 						(struct dp_soc *)soc,
7282 						 id);
7283 			if (pdev) {
7284 				dp_pdev_getstats((struct cdp_pdev *)pdev,
7285 						 stats);
7286 				return QDF_STATUS_SUCCESS;
7287 			}
7288 		}
7289 		break;
7290 	default:
7291 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7292 			"apstats cannot be updated for this input "
7293 			"type %d", type);
7294 		break;
7295 	}
7296 
7297 	return QDF_STATUS_E_FAILURE;
7298 }
7299 
7300 const
7301 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
7302 {
7303 	switch (ring_type) {
7304 	case REO_DST:
7305 		return "Reo_dst";
7306 	case REO_EXCEPTION:
7307 		return "Reo_exception";
7308 	case REO_CMD:
7309 		return "Reo_cmd";
7310 	case REO_REINJECT:
7311 		return "Reo_reinject";
7312 	case REO_STATUS:
7313 		return "Reo_status";
7314 	case WBM2SW_RELEASE:
7315 		return "wbm2sw_release";
7316 	case TCL_DATA:
7317 		return "tcl_data";
7318 	case TCL_CMD:
7319 		return "tcl_cmd";
7320 	case TCL_STATUS:
7321 		return "tcl_status";
7322 	case SW2WBM_RELEASE:
7323 		return "sw2wbm_release";
7324 	case RXDMA_BUF:
7325 		return "Rxdma_buf";
7326 	case RXDMA_DST:
7327 		return "Rxdma_dst";
7328 	case RXDMA_MONITOR_BUF:
7329 		return "Rxdma_monitor_buf";
7330 	case RXDMA_MONITOR_DESC:
7331 		return "Rxdma_monitor_desc";
7332 	case RXDMA_MONITOR_STATUS:
7333 		return "Rxdma_monitor_status";
7334 	default:
7335 		dp_err("Invalid ring type");
7336 		break;
7337 	}
7338 	return "Invalid";
7339 }
7340 
7341 /*
7342  * dp_print_napi_stats(): NAPI stats
7343  * @soc - soc handle
7344  */
7345 void dp_print_napi_stats(struct dp_soc *soc)
7346 {
7347 	hif_print_napi_stats(soc->hif_handle);
7348 }
7349 
7350 /**
7351  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
7352  * @vdev: DP_VDEV handle
7353  *
7354  * Return: QDF_STATUS
7355  */
7356 static inline QDF_STATUS
7357 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
7358 {
7359 	struct dp_peer *peer = NULL;
7360 
7361 	if (!vdev || !vdev->pdev)
7362 		return QDF_STATUS_E_FAILURE;
7363 
7364 	DP_STATS_CLR(vdev->pdev);
7365 	DP_STATS_CLR(vdev->pdev->soc);
7366 	DP_STATS_CLR(vdev);
7367 
7368 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
7369 
7370 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
7371 		if (!peer)
7372 			return QDF_STATUS_E_FAILURE;
7373 		DP_STATS_CLR(peer);
7374 
7375 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7376 		dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7377 				     &peer->stats,  peer->peer_ids[0],
7378 				     UPDATE_PEER_STATS, vdev->pdev->pdev_id);
7379 #endif
7380 	}
7381 
7382 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7383 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7384 			     &vdev->stats,  vdev->vdev_id,
7385 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
7386 #endif
7387 	return QDF_STATUS_SUCCESS;
7388 }
7389 
7390 /*
7391  * dp_get_host_peer_stats()- function to print peer stats
7392  * @soc: dp_soc handle
7393  * @mac_addr: mac address of the peer
7394  *
7395  * Return: QDF_STATUS
7396  */
7397 static QDF_STATUS
7398 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr)
7399 {
7400 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7401 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
7402 						      mac_addr, 0,
7403 						      DP_VDEV_ALL);
7404 	if (!peer || peer->delete_in_progress) {
7405 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7406 			  "%s: Invalid peer\n", __func__);
7407 		status = QDF_STATUS_E_FAILURE;
7408 		goto fail;
7409 	}
7410 
7411 	dp_print_peer_stats(peer);
7412 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
7413 fail:
7414 	if (peer)
7415 		dp_peer_unref_delete(peer);
7416 
7417 	return status;
7418 }
7419 
7420 /**
7421  * dp_txrx_stats_help() - Helper function for Txrx_Stats
7422  *
7423  * Return: None
7424  */
7425 static void dp_txrx_stats_help(void)
7426 {
7427 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
7428 	dp_info("stats_option:");
7429 	dp_info("  1 -- HTT Tx Statistics");
7430 	dp_info("  2 -- HTT Rx Statistics");
7431 	dp_info("  3 -- HTT Tx HW Queue Statistics");
7432 	dp_info("  4 -- HTT Tx HW Sched Statistics");
7433 	dp_info("  5 -- HTT Error Statistics");
7434 	dp_info("  6 -- HTT TQM Statistics");
7435 	dp_info("  7 -- HTT TQM CMDQ Statistics");
7436 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
7437 	dp_info("  9 -- HTT Tx Rate Statistics");
7438 	dp_info(" 10 -- HTT Rx Rate Statistics");
7439 	dp_info(" 11 -- HTT Peer Statistics");
7440 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
7441 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
7442 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
7443 	dp_info(" 15 -- HTT SRNG Statistics");
7444 	dp_info(" 16 -- HTT SFM Info Statistics");
7445 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
7446 	dp_info(" 18 -- HTT Peer List Details");
7447 	dp_info(" 20 -- Clear Host Statistics");
7448 	dp_info(" 21 -- Host Rx Rate Statistics");
7449 	dp_info(" 22 -- Host Tx Rate Statistics");
7450 	dp_info(" 23 -- Host Tx Statistics");
7451 	dp_info(" 24 -- Host Rx Statistics");
7452 	dp_info(" 25 -- Host AST Statistics");
7453 	dp_info(" 26 -- Host SRNG PTR Statistics");
7454 	dp_info(" 27 -- Host Mon Statistics");
7455 	dp_info(" 28 -- Host REO Queue Statistics");
7456 	dp_info(" 29 -- Host Soc cfg param Statistics");
7457 	dp_info(" 30 -- Host pdev cfg param Statistics");
7458 }
7459 
7460 /**
7461  * dp_print_host_stats()- Function to print the stats aggregated at host
7462  * @vdev_handle: DP_VDEV handle
7463  * @type: host stats type
7464  *
7465  * Return: 0 on success, print error message in case of failure
7466  */
7467 static int
7468 dp_print_host_stats(struct dp_vdev *vdev,
7469 		    struct cdp_txrx_stats_req *req)
7470 {
7471 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7472 	enum cdp_host_txrx_stats type =
7473 			dp_stats_mapping_table[req->stats][STATS_HOST];
7474 
7475 	dp_aggregate_pdev_stats(pdev);
7476 
7477 	switch (type) {
7478 	case TXRX_CLEAR_STATS:
7479 		dp_txrx_host_stats_clr(vdev);
7480 		break;
7481 	case TXRX_RX_RATE_STATS:
7482 		dp_print_rx_rates(vdev);
7483 		break;
7484 	case TXRX_TX_RATE_STATS:
7485 		dp_print_tx_rates(vdev);
7486 		break;
7487 	case TXRX_TX_HOST_STATS:
7488 		dp_print_pdev_tx_stats(pdev);
7489 		dp_print_soc_tx_stats(pdev->soc);
7490 		break;
7491 	case TXRX_RX_HOST_STATS:
7492 		dp_print_pdev_rx_stats(pdev);
7493 		dp_print_soc_rx_stats(pdev->soc);
7494 		break;
7495 	case TXRX_AST_STATS:
7496 		dp_print_ast_stats(pdev->soc);
7497 		dp_print_peer_table(vdev);
7498 		break;
7499 	case TXRX_SRNG_PTR_STATS:
7500 		dp_print_ring_stats(pdev);
7501 		break;
7502 	case TXRX_RX_MON_STATS:
7503 		dp_print_pdev_rx_mon_stats(pdev);
7504 		break;
7505 	case TXRX_REO_QUEUE_STATS:
7506 		dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc,
7507 				       req->peer_addr);
7508 		break;
7509 	case TXRX_SOC_CFG_PARAMS:
7510 		dp_print_soc_cfg_params(pdev->soc);
7511 		break;
7512 	case TXRX_PDEV_CFG_PARAMS:
7513 		dp_print_pdev_cfg_params(pdev);
7514 		break;
7515 	case TXRX_NAPI_STATS:
7516 		dp_print_napi_stats(pdev->soc);
7517 	case TXRX_SOC_INTERRUPT_STATS:
7518 		dp_print_soc_interrupt_stats(pdev->soc);
7519 		break;
7520 	default:
7521 		dp_info("Wrong Input For TxRx Host Stats");
7522 		dp_txrx_stats_help();
7523 		break;
7524 	}
7525 	return 0;
7526 }
7527 
7528 /*
7529  * dp_ppdu_ring_reset()- Reset PPDU Stats ring
7530  * @pdev: DP_PDEV handle
7531  *
7532  * Return: void
7533  */
7534 static void
7535 dp_ppdu_ring_reset(struct dp_pdev *pdev)
7536 {
7537 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
7538 	int mac_id;
7539 
7540 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
7541 
7542 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
7543 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
7544 							pdev->pdev_id);
7545 
7546 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
7547 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
7548 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
7549 	}
7550 }
7551 
7552 /*
7553  * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
7554  * @pdev: DP_PDEV handle
7555  *
7556  * Return: void
7557  */
7558 static void
7559 dp_ppdu_ring_cfg(struct dp_pdev *pdev)
7560 {
7561 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
7562 	int mac_id;
7563 
7564 	htt_tlv_filter.mpdu_start = 1;
7565 	htt_tlv_filter.msdu_start = 0;
7566 	htt_tlv_filter.packet = 0;
7567 	htt_tlv_filter.msdu_end = 0;
7568 	htt_tlv_filter.mpdu_end = 0;
7569 	htt_tlv_filter.attention = 0;
7570 	htt_tlv_filter.ppdu_start = 1;
7571 	htt_tlv_filter.ppdu_end = 1;
7572 	htt_tlv_filter.ppdu_end_user_stats = 1;
7573 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
7574 	htt_tlv_filter.ppdu_end_status_done = 1;
7575 	htt_tlv_filter.enable_fp = 1;
7576 	htt_tlv_filter.enable_md = 0;
7577 	if (pdev->neighbour_peers_added &&
7578 	    pdev->soc->hw_nac_monitor_support) {
7579 		htt_tlv_filter.enable_md = 1;
7580 		htt_tlv_filter.packet_header = 1;
7581 	}
7582 	if (pdev->mcopy_mode) {
7583 		htt_tlv_filter.packet_header = 1;
7584 		htt_tlv_filter.enable_mo = 1;
7585 	}
7586 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
7587 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
7588 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
7589 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
7590 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
7591 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
7592 	if (pdev->neighbour_peers_added &&
7593 	    pdev->soc->hw_nac_monitor_support)
7594 		htt_tlv_filter.md_data_filter = FILTER_DATA_ALL;
7595 
7596 	htt_tlv_filter.offset_valid = false;
7597 
7598 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
7599 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
7600 						pdev->pdev_id);
7601 
7602 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
7603 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
7604 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
7605 	}
7606 }
7607 
7608 /*
7609  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
7610  *                              modes are enabled or not.
7611  * @dp_pdev: dp pdev handle.
7612  *
7613  * Return: bool
7614  */
7615 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
7616 {
7617 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
7618 	    !pdev->mcopy_mode)
7619 		return true;
7620 	else
7621 		return false;
7622 }
7623 
7624 /*
7625  *dp_set_bpr_enable() - API to enable/disable bpr feature
7626  *@pdev_handle: DP_PDEV handle.
7627  *@val: Provided value.
7628  *
7629  *Return: 0 for success. nonzero for failure.
7630  */
7631 static QDF_STATUS
7632 dp_set_bpr_enable(struct dp_pdev *pdev, int val)
7633 {
7634 	switch (val) {
7635 	case CDP_BPR_DISABLE:
7636 		pdev->bpr_enable = CDP_BPR_DISABLE;
7637 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7638 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
7639 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7640 		} else if (pdev->enhanced_stats_en &&
7641 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7642 			   !pdev->pktlog_ppdu_stats) {
7643 			dp_h2t_cfg_stats_msg_send(pdev,
7644 						  DP_PPDU_STATS_CFG_ENH_STATS,
7645 						  pdev->pdev_id);
7646 		}
7647 		break;
7648 	case CDP_BPR_ENABLE:
7649 		pdev->bpr_enable = CDP_BPR_ENABLE;
7650 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
7651 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
7652 			dp_h2t_cfg_stats_msg_send(pdev,
7653 						  DP_PPDU_STATS_CFG_BPR,
7654 						  pdev->pdev_id);
7655 		} else if (pdev->enhanced_stats_en &&
7656 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7657 			   !pdev->pktlog_ppdu_stats) {
7658 			dp_h2t_cfg_stats_msg_send(pdev,
7659 						  DP_PPDU_STATS_CFG_BPR_ENH,
7660 						  pdev->pdev_id);
7661 		} else if (pdev->pktlog_ppdu_stats) {
7662 			dp_h2t_cfg_stats_msg_send(pdev,
7663 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
7664 						  pdev->pdev_id);
7665 		}
7666 		break;
7667 	default:
7668 		break;
7669 	}
7670 
7671 	return QDF_STATUS_SUCCESS;
7672 }
7673 
7674 /*
7675  * dp_pdev_tid_stats_ingress_inc
7676  * @pdev: pdev handle
7677  * @val: increase in value
7678  *
7679  * Return: void
7680  */
7681 static void
7682 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val)
7683 {
7684 	pdev->stats.tid_stats.ingress_stack += val;
7685 }
7686 
7687 /*
7688  * dp_pdev_tid_stats_osif_drop
7689  * @pdev: pdev handle
7690  * @val: increase in value
7691  *
7692  * Return: void
7693  */
7694 static void
7695 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val)
7696 {
7697 	pdev->stats.tid_stats.osif_drop += val;
7698 }
7699 
7700 
7701 /*
7702  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
7703  * @pdev: DP_PDEV handle
7704  * @val: user provided value
7705  *
7706  * Return: 0 for success. nonzero for failure.
7707  */
7708 static QDF_STATUS
7709 dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
7710 {
7711 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7712 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7713 
7714 	if (pdev->mcopy_mode)
7715 		dp_reset_monitor_mode(pdev_handle);
7716 
7717 	switch (val) {
7718 	case 0:
7719 		pdev->tx_sniffer_enable = 0;
7720 
7721 		pdev->monitor_configured = false;
7722 
7723 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7724 		    !pdev->bpr_enable) {
7725 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7726 			dp_ppdu_ring_reset(pdev);
7727 		} else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
7728 			dp_h2t_cfg_stats_msg_send(pdev,
7729 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7730 		} else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
7731 			dp_h2t_cfg_stats_msg_send(pdev,
7732 						  DP_PPDU_STATS_CFG_BPR_ENH,
7733 						  pdev->pdev_id);
7734 		} else {
7735 			dp_h2t_cfg_stats_msg_send(pdev,
7736 						  DP_PPDU_STATS_CFG_BPR,
7737 						  pdev->pdev_id);
7738 		}
7739 		break;
7740 
7741 	case 1:
7742 		pdev->tx_sniffer_enable = 1;
7743 		pdev->monitor_configured = false;
7744 
7745 		if (!pdev->pktlog_ppdu_stats)
7746 			dp_h2t_cfg_stats_msg_send(pdev,
7747 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7748 		break;
7749 	case 2:
7750 		if (pdev->monitor_vdev) {
7751 			status = QDF_STATUS_E_RESOURCES;
7752 			break;
7753 		}
7754 
7755 		pdev->mcopy_mode = 1;
7756 		dp_pdev_configure_monitor_rings(pdev);
7757 		pdev->monitor_configured = true;
7758 		pdev->tx_sniffer_enable = 0;
7759 
7760 		if (!pdev->pktlog_ppdu_stats)
7761 			dp_h2t_cfg_stats_msg_send(pdev,
7762 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7763 		break;
7764 
7765 	default:
7766 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7767 			"Invalid value");
7768 		break;
7769 	}
7770 	return status;
7771 }
7772 
7773 /*
7774  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
7775  * @soc_handle: DP_SOC handle
7776  * @pdev_id: id of DP_PDEV handle
7777  *
7778  * Return: QDF_STATUS
7779  */
7780 static QDF_STATUS
7781 dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
7782 {
7783 	struct dp_pdev *pdev =
7784 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7785 						   pdev_id);
7786 
7787 	if (!pdev)
7788 		return QDF_STATUS_E_FAILURE;
7789 
7790 	if (pdev->enhanced_stats_en == 0)
7791 		dp_cal_client_timer_start(pdev->cal_client_ctx);
7792 
7793 	pdev->enhanced_stats_en = 1;
7794 
7795 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
7796 	    !pdev->monitor_vdev)
7797 		dp_ppdu_ring_cfg(pdev);
7798 
7799 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7800 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7801 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7802 		dp_h2t_cfg_stats_msg_send(pdev,
7803 					  DP_PPDU_STATS_CFG_BPR_ENH,
7804 					  pdev->pdev_id);
7805 	}
7806 
7807 	return QDF_STATUS_SUCCESS;
7808 }
7809 
7810 /*
7811  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
7812  *
7813  * @param soc - the soc handle
7814  * @param pdev_id - pdev_id of pdev
7815  * @return - QDF_STATUS
7816  */
7817 static QDF_STATUS
7818 dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
7819 {
7820 	struct dp_pdev *pdev =
7821 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7822 						   pdev_id);
7823 
7824 	if (!pdev)
7825 		return QDF_STATUS_E_FAILURE;
7826 
7827 	if (pdev->enhanced_stats_en == 1)
7828 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
7829 
7830 	pdev->enhanced_stats_en = 0;
7831 
7832 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7833 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7834 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7835 		dp_h2t_cfg_stats_msg_send(pdev,
7836 					  DP_PPDU_STATS_CFG_BPR,
7837 					  pdev->pdev_id);
7838 	}
7839 
7840 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
7841 	    !pdev->monitor_vdev)
7842 		dp_ppdu_ring_reset(pdev);
7843 
7844 	return QDF_STATUS_SUCCESS;
7845 }
7846 
7847 /*
7848  * dp_get_fw_peer_stats()- function to print peer stats
7849  * @soc: soc handle
7850  * @pdev_id : id of the pdev handle
7851  * @mac_addr: mac address of the peer
7852  * @cap: Type of htt stats requested
7853  * @is_wait: if set, wait on completion from firmware response
7854  *
7855  * Currently Supporting only MAC ID based requests Only
7856  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
7857  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
7858  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
7859  *
7860  * Return: QDF_STATUS
7861  */
7862 static QDF_STATUS
7863 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
7864 		     uint8_t *mac_addr,
7865 		     uint32_t cap, uint32_t is_wait)
7866 {
7867 	int i;
7868 	uint32_t config_param0 = 0;
7869 	uint32_t config_param1 = 0;
7870 	uint32_t config_param2 = 0;
7871 	uint32_t config_param3 = 0;
7872 	struct dp_pdev *pdev =
7873 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7874 						   pdev_id);
7875 
7876 	if (!pdev)
7877 		return QDF_STATUS_E_FAILURE;
7878 
7879 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
7880 	config_param0 |= (1 << (cap + 1));
7881 
7882 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
7883 		config_param1 |= (1 << i);
7884 	}
7885 
7886 	config_param2 |= (mac_addr[0] & 0x000000ff);
7887 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
7888 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
7889 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
7890 
7891 	config_param3 |= (mac_addr[4] & 0x000000ff);
7892 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
7893 
7894 	if (is_wait) {
7895 		qdf_event_reset(&pdev->fw_peer_stats_event);
7896 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
7897 					  config_param0, config_param1,
7898 					  config_param2, config_param3,
7899 					  0, 1, 0);
7900 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
7901 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
7902 	} else {
7903 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
7904 					  config_param0, config_param1,
7905 					  config_param2, config_param3,
7906 					  0, 0, 0);
7907 	}
7908 
7909 	return QDF_STATUS_SUCCESS;
7910 
7911 }
7912 
7913 /* This struct definition will be removed from here
7914  * once it get added in FW headers*/
7915 struct httstats_cmd_req {
7916     uint32_t    config_param0;
7917     uint32_t    config_param1;
7918     uint32_t    config_param2;
7919     uint32_t    config_param3;
7920     int cookie;
7921     u_int8_t    stats_id;
7922 };
7923 
7924 /*
7925  * dp_get_htt_stats: function to process the httstas request
7926  * @soc: DP soc handle
7927  * @pdev_id: id of pdev handle
7928  * @data: pointer to request data
7929  * @data_len: length for request data
7930  *
7931  * return: QDF_STATUS
7932  */
7933 static QDF_STATUS
7934 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data,
7935 		 uint32_t data_len)
7936 {
7937 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
7938 	struct dp_pdev *pdev =
7939 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7940 						   pdev_id);
7941 
7942 	if (!pdev)
7943 		return QDF_STATUS_E_FAILURE;
7944 
7945 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
7946 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
7947 				req->config_param0, req->config_param1,
7948 				req->config_param2, req->config_param3,
7949 				req->cookie, 0, 0);
7950 
7951 	return QDF_STATUS_SUCCESS;
7952 }
7953 
7954 /*
7955  * dp_set_pdev_param: function to set parameters in pdev
7956  * @pdev_handle: DP pdev handle
7957  * @param: parameter type to be set
7958  * @val: value of parameter to be set
7959  *
7960  * Return: 0 for success. nonzero for failure.
7961  */
7962 static QDF_STATUS dp_set_pdev_param(struct cdp_pdev *pdev_handle,
7963 				    enum cdp_pdev_param_type param,
7964 				    uint32_t val)
7965 {
7966 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7967 	switch (param) {
7968 	case CDP_CONFIG_DEBUG_SNIFFER:
7969 		return dp_config_debug_sniffer(pdev_handle, val);
7970 	case CDP_CONFIG_BPR_ENABLE:
7971 		return dp_set_bpr_enable(pdev, val);
7972 	case CDP_CONFIG_PRIMARY_RADIO:
7973 		pdev->is_primary = val;
7974 		break;
7975 	case CDP_CONFIG_CAPTURE_LATENCY:
7976 		if (val == 1)
7977 			pdev->latency_capture_enable = true;
7978 		else
7979 			pdev->latency_capture_enable = false;
7980 		break;
7981 	case CDP_INGRESS_STATS:
7982 		dp_pdev_tid_stats_ingress_inc(pdev, val);
7983 		break;
7984 	case CDP_OSIF_DROP:
7985 		dp_pdev_tid_stats_osif_drop(pdev, val);
7986 		break;
7987 	case CDP_CONFIG_ENH_RX_CAPTURE:
7988 		return dp_config_enh_rx_capture(pdev_handle, val);
7989 	case CDP_CONFIG_TX_CAPTURE:
7990 		return dp_config_enh_tx_capture(pdev_handle, val);
7991 	default:
7992 		return QDF_STATUS_E_INVAL;
7993 	}
7994 	return QDF_STATUS_SUCCESS;
7995 }
7996 
7997 /*
7998  * dp_calculate_delay_stats: function to get rx delay stats
7999  * @vdev_handle: DP vdev handle
8000  * @nbuf: skb
8001  *
8002  * Return: void
8003  */
8004 static void dp_calculate_delay_stats(struct cdp_vdev *vdev_handle,
8005 				     qdf_nbuf_t nbuf)
8006 {
8007 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8008 
8009 	dp_rx_compute_delay(vdev, nbuf);
8010 }
8011 
8012 /*
8013  * dp_get_vdev_param: function to get parameters from vdev
8014  * @param: parameter type to get value
8015  *
8016  * return: void
8017  */
8018 static uint32_t dp_get_vdev_param(struct cdp_vdev *vdev_handle,
8019 				  enum cdp_vdev_param_type param)
8020 {
8021 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8022 	uint32_t val;
8023 
8024 	switch (param) {
8025 	case CDP_ENABLE_WDS:
8026 		val = vdev->wds_enabled;
8027 		break;
8028 	case CDP_ENABLE_MEC:
8029 		val = vdev->mec_enabled;
8030 		break;
8031 	case CDP_ENABLE_DA_WAR:
8032 		val = vdev->pdev->soc->da_war_enabled;
8033 		break;
8034 	default:
8035 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8036 			  "param value %d is wrong\n",
8037 			  param);
8038 		val = -1;
8039 		break;
8040 	}
8041 
8042 	return val;
8043 }
8044 
8045 /*
8046  * dp_set_vdev_param: function to set parameters in vdev
8047  * @param: parameter type to be set
8048  * @val: value of parameter to be set
8049  *
8050  * return: void
8051  */
8052 static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
8053 		enum cdp_vdev_param_type param, uint32_t val)
8054 {
8055 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8056 	switch (param) {
8057 	case CDP_ENABLE_WDS:
8058 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8059 			  "wds_enable %d for vdev(%pK) id(%d)\n",
8060 			  val, vdev, vdev->vdev_id);
8061 		vdev->wds_enabled = val;
8062 		break;
8063 	case CDP_ENABLE_MEC:
8064 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8065 			  "mec_enable %d for vdev(%pK) id(%d)\n",
8066 			  val, vdev, vdev->vdev_id);
8067 		vdev->mec_enabled = val;
8068 		break;
8069 	case CDP_ENABLE_DA_WAR:
8070 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8071 			  "da_war_enable %d for vdev(%pK) id(%d)\n",
8072 			  val, vdev, vdev->vdev_id);
8073 		vdev->pdev->soc->da_war_enabled = val;
8074 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
8075 					     vdev->pdev->soc));
8076 		break;
8077 	case CDP_ENABLE_NAWDS:
8078 		vdev->nawds_enabled = val;
8079 		break;
8080 	case CDP_ENABLE_MCAST_EN:
8081 		vdev->mcast_enhancement_en = val;
8082 		break;
8083 	case CDP_ENABLE_PROXYSTA:
8084 		vdev->proxysta_vdev = val;
8085 		break;
8086 	case CDP_UPDATE_TDLS_FLAGS:
8087 		vdev->tdls_link_connected = val;
8088 		break;
8089 	case CDP_CFG_WDS_AGING_TIMER:
8090 		if (val == 0)
8091 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
8092 		else if (val != vdev->wds_aging_timer_val)
8093 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, val);
8094 
8095 		vdev->wds_aging_timer_val = val;
8096 		break;
8097 	case CDP_ENABLE_AP_BRIDGE:
8098 		if (wlan_op_mode_sta != vdev->opmode)
8099 			vdev->ap_bridge_enabled = val;
8100 		else
8101 			vdev->ap_bridge_enabled = false;
8102 		break;
8103 	case CDP_ENABLE_CIPHER:
8104 		vdev->sec_type = val;
8105 		break;
8106 	case CDP_ENABLE_QWRAP_ISOLATION:
8107 		vdev->isolation_vdev = val;
8108 		break;
8109 	case CDP_UPDATE_MULTIPASS:
8110 		vdev->multipass_en = val;
8111 		break;
8112 	default:
8113 		break;
8114 	}
8115 
8116 	dp_tx_vdev_update_search_flags(vdev);
8117 }
8118 
8119 /**
8120  * dp_peer_set_nawds: set nawds bit in peer
8121  * @peer_handle: pointer to peer
8122  * @value: enable/disable nawds
8123  *
8124  * return: void
8125  */
8126 static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
8127 {
8128 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8129 	peer->nawds_enabled = value;
8130 }
8131 
8132 /**
8133  * dp_peer_set_tx_capture_enabled: Set tx_cap_enabled bit in peer
8134  * @peer_handle: Peer handle
8135  * @value: Enable/disable setting for tx_cap_enabled
8136  *
8137  * Return: None
8138  */
8139 static void
8140 dp_peer_set_tx_capture_enabled(struct cdp_peer *peer_handle, bool value)
8141 {
8142 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8143 
8144 	peer->tx_cap_enabled = value;
8145 }
8146 
8147 /**
8148  * dp_peer_set_rx_capture_enabled: Set rx_cap_enabled bit in peer
8149  * @peer_handle: Peer handle
8150  * @value: Enable/disable setting for rx_cap_enabled
8151  *
8152  * Return: None
8153  */
8154 static void
8155 dp_peer_set_rx_capture_enabled(struct cdp_peer *peer_handle, bool value)
8156 {
8157 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8158 
8159 	peer->rx_cap_enabled = value;
8160 }
8161 
8162 /**
8163  * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
8164  * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
8165  * @is_tx_pkt_cap_enable: enable/disable Tx packet capture in monitor mode
8166  * @peer_mac: MAC address for which the above need to be enabled/disabled
8167  *
8168  * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
8169  */
8170 QDF_STATUS
8171 dp_peer_update_pkt_capture_params(struct cdp_pdev *pdev,
8172 				  bool is_rx_pkt_cap_enable,
8173 				  bool is_tx_pkt_cap_enable,
8174 				  uint8_t *peer_mac)
8175 
8176 {
8177 	struct dp_peer *peer;
8178 
8179 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev,
8180 			peer_mac);
8181 
8182 	if (!peer) {
8183 		dp_err("Invalid Peer");
8184 		return QDF_STATUS_E_FAILURE;
8185 	}
8186 
8187 	dp_peer_set_rx_capture_enabled((struct cdp_peer *)peer,
8188 				       is_rx_pkt_cap_enable);
8189 	dp_peer_set_tx_capture_enabled((struct cdp_peer *)peer,
8190 				       is_tx_pkt_cap_enable);
8191 	return QDF_STATUS_SUCCESS;
8192 }
8193 
8194 /*
8195  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
8196  * @soc: DP_SOC handle
8197  * @vdev_id: id of DP_VDEV handle
8198  * @map_id:ID of map that needs to be updated
8199  *
8200  * Return: QDF_STATUS
8201  */
8202 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle soc,
8203 						 uint8_t vdev_id,
8204 						 uint8_t map_id)
8205 {
8206 	struct dp_vdev *vdev =
8207 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
8208 						   vdev_id);
8209 	if (vdev) {
8210 		vdev->dscp_tid_map_id = map_id;
8211 		return QDF_STATUS_SUCCESS;
8212 	}
8213 
8214 	return QDF_STATUS_E_FAILURE;
8215 }
8216 
8217 #ifdef DP_RATETABLE_SUPPORT
8218 static int dp_txrx_get_ratekbps(int preamb, int mcs,
8219 				int htflag, int gintval)
8220 {
8221 	uint32_t rix;
8222 	uint16_t ratecode;
8223 
8224 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
8225 			       (uint8_t)preamb, 1, &rix, &ratecode);
8226 }
8227 #else
8228 static int dp_txrx_get_ratekbps(int preamb, int mcs,
8229 				int htflag, int gintval)
8230 {
8231 	return 0;
8232 }
8233 #endif
8234 
8235 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
8236  * @soc: DP soc handle
8237  * @pdev_id: id of DP pdev handle
8238  * @pdev_stats: buffer to copy to
8239  *
8240  * return : status success/failure
8241  */
8242 static QDF_STATUS
8243 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
8244 		       struct cdp_pdev_stats *pdev_stats)
8245 {
8246 	struct dp_pdev *pdev =
8247 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8248 						   pdev_id);
8249 	if (!pdev)
8250 		return QDF_STATUS_E_FAILURE;
8251 
8252 	dp_aggregate_pdev_stats(pdev);
8253 
8254 	qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
8255 	return QDF_STATUS_SUCCESS;
8256 }
8257 
8258 /* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP
8259  * @vdev_handle: DP vdev handle
8260  * @buf: buffer containing specific stats structure
8261  *
8262  * Returns: void
8263  */
8264 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
8265 					 void *buf)
8266 {
8267 	struct cdp_tx_ingress_stats *host_stats = NULL;
8268 
8269 	if (!buf) {
8270 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8271 			  "Invalid host stats buf");
8272 		return;
8273 	}
8274 	host_stats = (struct cdp_tx_ingress_stats *)buf;
8275 
8276 	DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
8277 			 host_stats->mcast_en.mcast_pkt.num,
8278 			 host_stats->mcast_en.mcast_pkt.bytes);
8279 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
8280 		     host_stats->mcast_en.dropped_map_error);
8281 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
8282 		     host_stats->mcast_en.dropped_self_mac);
8283 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
8284 		     host_stats->mcast_en.dropped_send_fail);
8285 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
8286 		     host_stats->mcast_en.ucast);
8287 	DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
8288 		     host_stats->mcast_en.fail_seg_alloc);
8289 	DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
8290 		     host_stats->mcast_en.clone_fail);
8291 }
8292 
8293 /* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP
8294  * @soc: DP soc handle
8295  * @vdev_id: id of DP vdev handle
8296  * @buf: buffer containing specific stats structure
8297  * @stats_id: stats type
8298  *
8299  * Returns: QDF_STATUS
8300  */
8301 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc,
8302 						 uint8_t vdev_id,
8303 						 void *buf,
8304 						 uint16_t stats_id)
8305 {
8306 	struct dp_vdev *vdev =
8307 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
8308 						   vdev_id);
8309 	if (!vdev) {
8310 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8311 			  "Invalid vdev handle");
8312 		return QDF_STATUS_E_FAILURE;
8313 	}
8314 	switch (stats_id) {
8315 	case DP_VDEV_STATS_PKT_CNT_ONLY:
8316 		break;
8317 	case DP_VDEV_STATS_TX_ME:
8318 		dp_txrx_update_vdev_me_stats(vdev, buf);
8319 		break;
8320 	default:
8321 		qdf_info("Invalid stats_id %d", stats_id);
8322 		break;
8323 	}
8324 
8325 	return QDF_STATUS_SUCCESS;
8326 }
8327 
8328 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
8329  * @soc: soc handle
8330  * @vdev_id: id of vdev handle
8331  * @peer_mac: mac of DP_PEER handle
8332  * @peer_stats: buffer to copy to
8333  * return : status success/failure
8334  */
8335 static QDF_STATUS
8336 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
8337 		       uint8_t *peer_mac, struct cdp_peer_stats *peer_stats)
8338 {
8339 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8340 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8341 						       peer_mac, 0, vdev_id);
8342 
8343 	if (!peer || peer->delete_in_progress) {
8344 		status = QDF_STATUS_E_FAILURE;
8345 		goto fail;
8346 	} else
8347 		qdf_mem_copy(peer_stats, &peer->stats,
8348 			     sizeof(struct cdp_peer_stats));
8349 
8350 fail:
8351 	if (peer)
8352 		dp_peer_unref_delete(peer);
8353 
8354 	return status;
8355 }
8356 
8357 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
8358  * @soc: soc handle
8359  * @vdev_id: id of vdev handle
8360  * @peer_mac: mac of DP_PEER handle
8361  *
8362  * return : QDF_STATUS
8363  */
8364 static QDF_STATUS
8365 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
8366 			 uint8_t *peer_mac)
8367 {
8368 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8369 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8370 						       peer_mac, 0, vdev_id);
8371 
8372 	if (!peer || peer->delete_in_progress) {
8373 		status = QDF_STATUS_E_FAILURE;
8374 		goto fail;
8375 	}
8376 
8377 	qdf_mem_zero(&peer->stats, sizeof(peer->stats));
8378 
8379 fail:
8380 	if (peer)
8381 		dp_peer_unref_delete(peer);
8382 
8383 	return status;
8384 }
8385 
8386 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
8387  * @vdev_handle: DP_VDEV handle
8388  * @buf: buffer for vdev stats
8389  *
8390  * return : int
8391  */
8392 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
8393 				   void *buf, bool is_aggregate)
8394 {
8395 	struct cdp_vdev_stats *vdev_stats;
8396 	struct dp_pdev *pdev;
8397 	struct dp_vdev *vdev =
8398 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
8399 						   vdev_id);
8400 
8401 	if (!vdev)
8402 		return 1;
8403 
8404 	pdev = vdev->pdev;
8405 	if (!pdev)
8406 		return 1;
8407 
8408 	vdev_stats = (struct cdp_vdev_stats *)buf;
8409 
8410 	if (is_aggregate) {
8411 		qdf_spin_lock_bh(&((struct dp_soc *)soc)->peer_ref_mutex);
8412 		dp_aggregate_vdev_stats(vdev, buf);
8413 		qdf_spin_unlock_bh(&((struct dp_soc *)soc)->peer_ref_mutex);
8414 	} else {
8415 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
8416 	}
8417 
8418 	return 0;
8419 }
8420 
8421 /*
8422  * dp_get_total_per(): get total per
8423  * @soc: DP soc handle
8424  * @pdev_id: id of DP_PDEV handle
8425  *
8426  * Return: % error rate using retries per packet and success packets
8427  */
8428 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id)
8429 {
8430 	struct dp_pdev *pdev =
8431 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8432 						   pdev_id);
8433 
8434 	if (!pdev)
8435 		return 0;
8436 
8437 	dp_aggregate_pdev_stats(pdev);
8438 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
8439 		return 0;
8440 	return ((pdev->stats.tx.retries * 100) /
8441 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
8442 }
8443 
8444 /*
8445  * dp_txrx_stats_publish(): publish pdev stats into a buffer
8446  * @soc: DP soc handle
8447  * @pdev_id: id of DP_PDEV handle
8448  * @buf: to hold pdev_stats
8449  *
8450  * Return: int
8451  */
8452 static int
8453 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id,
8454 		      struct cdp_stats_extd *buf)
8455 {
8456 	struct cdp_txrx_stats_req req = {0,};
8457 	struct dp_pdev *pdev =
8458 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8459 						   pdev_id);
8460 
8461 	if (!pdev)
8462 		return TXRX_STATS_LEVEL_OFF;
8463 
8464 	dp_aggregate_pdev_stats(pdev);
8465 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
8466 	req.cookie_val = 1;
8467 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
8468 				req.param1, req.param2, req.param3, 0,
8469 				req.cookie_val, 0);
8470 
8471 	msleep(DP_MAX_SLEEP_TIME);
8472 
8473 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
8474 	req.cookie_val = 1;
8475 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
8476 				req.param1, req.param2, req.param3, 0,
8477 				req.cookie_val, 0);
8478 
8479 	msleep(DP_MAX_SLEEP_TIME);
8480 	qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_stats_extd));
8481 
8482 	return TXRX_STATS_LEVEL;
8483 }
8484 
8485 /**
8486  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
8487  * @soc: soc handle
8488  * @pdev_id: id of DP_PDEV handle
8489  * @map_id: ID of map that needs to be updated
8490  * @tos: index value in map
8491  * @tid: tid value passed by the user
8492  *
8493  * Return: QDF_STATUS
8494  */
8495 static QDF_STATUS
8496 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle,
8497 			       uint8_t pdev_id,
8498 			       uint8_t map_id,
8499 			       uint8_t tos, uint8_t tid)
8500 {
8501 	uint8_t dscp;
8502 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8503 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
8504 
8505 	if (!pdev)
8506 		return QDF_STATUS_E_FAILURE;
8507 
8508 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
8509 	pdev->dscp_tid_map[map_id][dscp] = tid;
8510 
8511 	if (map_id < soc->num_hw_dscp_tid_map)
8512 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
8513 				       map_id, dscp);
8514 	else
8515 		return QDF_STATUS_E_FAILURE;
8516 
8517 	return QDF_STATUS_SUCCESS;
8518 }
8519 
8520 /**
8521  * dp_hmmc_tid_override_en_wifi3(): Function to enable hmmc tid override.
8522  * @pdev_handle: pdev handle
8523  * @val: hmmc-dscp flag value
8524  *
8525  * Return: void
8526  */
8527 static void dp_hmmc_tid_override_en_wifi3(struct cdp_pdev *pdev_handle,
8528 					  bool val)
8529 {
8530 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8531 
8532 	pdev->hmmc_tid_override_en = val;
8533 }
8534 
8535 /**
8536  * dp_set_hmmc_tid_val_wifi3(): Function to set hmmc tid value.
8537  * @pdev_handle: pdev handle
8538  * @tid: tid value
8539  *
8540  * Return: void
8541  */
8542 static void dp_set_hmmc_tid_val_wifi3(struct cdp_pdev *pdev_handle,
8543 				      uint8_t tid)
8544 {
8545 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8546 
8547 	pdev->hmmc_tid = tid;
8548 }
8549 
8550 /**
8551  * dp_fw_stats_process(): Process TxRX FW stats request
8552  * @vdev_handle: DP VDEV handle
8553  * @req: stats request
8554  *
8555  * return: int
8556  */
8557 static int dp_fw_stats_process(struct dp_vdev *vdev,
8558 			       struct cdp_txrx_stats_req *req)
8559 {
8560 	struct dp_pdev *pdev = NULL;
8561 	uint32_t stats = req->stats;
8562 	uint8_t mac_id = req->mac_id;
8563 
8564 	if (!vdev) {
8565 		DP_TRACE(NONE, "VDEV not found");
8566 		return 1;
8567 	}
8568 	pdev = vdev->pdev;
8569 
8570 	/*
8571 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
8572 	 * from param0 to param3 according to below rule:
8573 	 *
8574 	 * PARAM:
8575 	 *   - config_param0 : start_offset (stats type)
8576 	 *   - config_param1 : stats bmask from start offset
8577 	 *   - config_param2 : stats bmask from start offset + 32
8578 	 *   - config_param3 : stats bmask from start offset + 64
8579 	 */
8580 	if (req->stats == CDP_TXRX_STATS_0) {
8581 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
8582 		req->param1 = 0xFFFFFFFF;
8583 		req->param2 = 0xFFFFFFFF;
8584 		req->param3 = 0xFFFFFFFF;
8585 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
8586 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
8587 	}
8588 
8589 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
8590 				req->param1, req->param2, req->param3,
8591 				0, 0, mac_id);
8592 }
8593 
8594 /**
8595  * dp_txrx_stats_request - function to map to firmware and host stats
8596  * @soc: soc handle
8597  * @vdev_id: virtual device ID
8598  * @req: stats request
8599  *
8600  * Return: QDF_STATUS
8601  */
8602 static
8603 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle,
8604 				 uint8_t vdev_id,
8605 				 struct cdp_txrx_stats_req *req)
8606 {
8607 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle);
8608 	int host_stats;
8609 	int fw_stats;
8610 	enum cdp_stats stats;
8611 	int num_stats;
8612 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc,
8613 								  vdev_id);
8614 
8615 	if (!vdev || !req) {
8616 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8617 				"Invalid vdev/req instance");
8618 		return QDF_STATUS_E_INVAL;
8619 	}
8620 
8621 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
8622 		dp_err("Invalid mac id request");
8623 		return QDF_STATUS_E_INVAL;
8624 	}
8625 
8626 	stats = req->stats;
8627 	if (stats >= CDP_TXRX_MAX_STATS)
8628 		return QDF_STATUS_E_INVAL;
8629 
8630 	/*
8631 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
8632 	 *			has to be updated if new FW HTT stats added
8633 	 */
8634 	if (stats > CDP_TXRX_STATS_HTT_MAX)
8635 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
8636 
8637 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
8638 
8639 	if (stats >= num_stats) {
8640 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8641 			  "%s: Invalid stats option: %d", __func__, stats);
8642 		return QDF_STATUS_E_INVAL;
8643 	}
8644 
8645 	req->stats = stats;
8646 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
8647 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
8648 
8649 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
8650 		stats, fw_stats, host_stats);
8651 
8652 	if (fw_stats != TXRX_FW_STATS_INVALID) {
8653 		/* update request with FW stats type */
8654 		req->stats = fw_stats;
8655 		return dp_fw_stats_process(vdev, req);
8656 	}
8657 
8658 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
8659 			(host_stats <= TXRX_HOST_STATS_MAX))
8660 		return dp_print_host_stats(vdev, req);
8661 	else
8662 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8663 				"Wrong Input for TxRx Stats");
8664 
8665 	return QDF_STATUS_SUCCESS;
8666 }
8667 
8668 /*
8669  * dp_txrx_dump_stats() -  Dump statistics
8670  * @value - Statistics option
8671  */
8672 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
8673 				     enum qdf_stats_verbosity_level level)
8674 {
8675 	struct dp_soc *soc =
8676 		(struct dp_soc *)psoc;
8677 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8678 
8679 	if (!soc) {
8680 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8681 			"%s: soc is NULL", __func__);
8682 		return QDF_STATUS_E_INVAL;
8683 	}
8684 
8685 	switch (value) {
8686 	case CDP_TXRX_PATH_STATS:
8687 		dp_txrx_path_stats(soc);
8688 		dp_print_soc_interrupt_stats(soc);
8689 		break;
8690 
8691 	case CDP_RX_RING_STATS:
8692 		dp_print_per_ring_stats(soc);
8693 		break;
8694 
8695 	case CDP_TXRX_TSO_STATS:
8696 		dp_print_tso_stats(soc, level);
8697 		break;
8698 
8699 	case CDP_DUMP_TX_FLOW_POOL_INFO:
8700 		if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
8701 			cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
8702 		break;
8703 
8704 	case CDP_DP_NAPI_STATS:
8705 		dp_print_napi_stats(soc);
8706 		break;
8707 
8708 	case CDP_TXRX_DESC_STATS:
8709 		/* TODO: NOT IMPLEMENTED */
8710 		break;
8711 
8712 	default:
8713 		status = QDF_STATUS_E_INVAL;
8714 		break;
8715 	}
8716 
8717 	return status;
8718 
8719 }
8720 
8721 /**
8722  * dp_txrx_clear_dump_stats() - clear dumpStats
8723  * @soc- soc handle
8724  * @value - stats option
8725  *
8726  * Return: 0 - Success, non-zero - failure
8727  */
8728 static
8729 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
8730 				    uint8_t value)
8731 {
8732 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8733 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8734 
8735 	if (!soc) {
8736 		dp_err("%s: soc is NULL", __func__);
8737 		return QDF_STATUS_E_INVAL;
8738 	}
8739 
8740 	switch (value) {
8741 	case CDP_TXRX_TSO_STATS:
8742 		dp_txrx_clear_tso_stats(soc);
8743 		break;
8744 
8745 	default:
8746 		status = QDF_STATUS_E_INVAL;
8747 		break;
8748 	}
8749 
8750 	return status;
8751 }
8752 
8753 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
8754 /**
8755  * dp_update_flow_control_parameters() - API to store datapath
8756  *                            config parameters
8757  * @soc: soc handle
8758  * @cfg: ini parameter handle
8759  *
8760  * Return: void
8761  */
8762 static inline
8763 void dp_update_flow_control_parameters(struct dp_soc *soc,
8764 				struct cdp_config_params *params)
8765 {
8766 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
8767 					params->tx_flow_stop_queue_threshold;
8768 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
8769 					params->tx_flow_start_queue_offset;
8770 }
8771 #else
8772 static inline
8773 void dp_update_flow_control_parameters(struct dp_soc *soc,
8774 				struct cdp_config_params *params)
8775 {
8776 }
8777 #endif
8778 
8779 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
8780 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
8781 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
8782 
8783 /* Max packet limit for RX REAP Loop (dp_rx_process) */
8784 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
8785 
8786 static
8787 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
8788 					struct cdp_config_params *params)
8789 {
8790 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
8791 				params->tx_comp_loop_pkt_limit;
8792 
8793 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
8794 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
8795 	else
8796 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
8797 
8798 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
8799 				params->rx_reap_loop_pkt_limit;
8800 
8801 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
8802 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
8803 	else
8804 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
8805 
8806 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
8807 				params->rx_hp_oos_update_limit;
8808 
8809 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
8810 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
8811 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
8812 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
8813 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
8814 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
8815 }
8816 #else
8817 static inline
8818 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
8819 					struct cdp_config_params *params)
8820 { }
8821 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
8822 
8823 /**
8824  * dp_update_config_parameters() - API to store datapath
8825  *                            config parameters
8826  * @soc: soc handle
8827  * @cfg: ini parameter handle
8828  *
8829  * Return: status
8830  */
8831 static
8832 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
8833 				struct cdp_config_params *params)
8834 {
8835 	struct dp_soc *soc = (struct dp_soc *)psoc;
8836 
8837 	if (!(soc)) {
8838 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8839 				"%s: Invalid handle", __func__);
8840 		return QDF_STATUS_E_INVAL;
8841 	}
8842 
8843 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
8844 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
8845 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
8846 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
8847 				params->tcp_udp_checksumoffload;
8848 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
8849 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
8850 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
8851 
8852 	dp_update_rx_soft_irq_limit_params(soc, params);
8853 	dp_update_flow_control_parameters(soc, params);
8854 
8855 	return QDF_STATUS_SUCCESS;
8856 }
8857 
8858 static struct cdp_wds_ops dp_ops_wds = {
8859 	.vdev_set_wds = dp_vdev_set_wds,
8860 #ifdef WDS_VENDOR_EXTENSION
8861 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
8862 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
8863 #endif
8864 };
8865 
8866 /*
8867  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
8868  * @soc_hdl - datapath soc handle
8869  * @vdev_id - virtual interface id
8870  * @callback - callback function
8871  * @ctxt: callback context
8872  *
8873  */
8874 static void
8875 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8876 		       ol_txrx_data_tx_cb callback, void *ctxt)
8877 {
8878 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8879 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
8880 
8881 	if (!vdev)
8882 		return;
8883 
8884 	vdev->tx_non_std_data_callback.func = callback;
8885 	vdev->tx_non_std_data_callback.ctxt = ctxt;
8886 }
8887 
8888 /**
8889  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
8890  * @soc: datapath soc handle
8891  * @pdev_id: id of datapath pdev handle
8892  *
8893  * Return: opaque pointer to dp txrx handle
8894  */
8895 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id)
8896 {
8897 	struct dp_pdev *pdev =
8898 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8899 						   pdev_id);
8900 	if (qdf_unlikely(!pdev))
8901 		return NULL;
8902 
8903 	return pdev->dp_txrx_handle;
8904 }
8905 
8906 /**
8907  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
8908  * @soc: datapath soc handle
8909  * @pdev_id: id of datapath pdev handle
8910  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
8911  *
8912  * Return: void
8913  */
8914 static void
8915 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id,
8916 			   void *dp_txrx_hdl)
8917 {
8918 	struct dp_pdev *pdev =
8919 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8920 						   pdev_id);
8921 
8922 	if (!pdev)
8923 		return;
8924 
8925 	pdev->dp_txrx_handle = dp_txrx_hdl;
8926 }
8927 
8928 /**
8929  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
8930  * @soc_handle: datapath soc handle
8931  *
8932  * Return: opaque pointer to external dp (non-core DP)
8933  */
8934 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
8935 {
8936 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8937 
8938 	return soc->external_txrx_handle;
8939 }
8940 
8941 /**
8942  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
8943  * @soc_handle: datapath soc handle
8944  * @txrx_handle: opaque pointer to external dp (non-core DP)
8945  *
8946  * Return: void
8947  */
8948 static void
8949 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
8950 {
8951 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8952 
8953 	soc->external_txrx_handle = txrx_handle;
8954 }
8955 
8956 /**
8957  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
8958  * @soc_hdl: datapath soc handle
8959  * @pdev_id: id of the datapath pdev handle
8960  * @lmac_id: lmac id
8961  *
8962  * Return: QDF_STATUS
8963  */
8964 static QDF_STATUS
8965 dp_soc_map_pdev_to_lmac(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
8966 			uint32_t lmac_id)
8967 {
8968 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8969 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc,
8970 								  pdev_id);
8971 
8972 	if (qdf_unlikely(!pdev))
8973 		return QDF_STATUS_E_FAILURE;
8974 
8975 	pdev->lmac_id = lmac_id;
8976 	wlan_cfg_set_hw_macid(soc->wlan_cfg_ctx,
8977 			      pdev_id,
8978 			      (lmac_id + 1));
8979 
8980 	return QDF_STATUS_SUCCESS;
8981 }
8982 
8983 /**
8984  * dp_soc_set_pdev_status_down() - set pdev down/up status
8985  * @soc: datapath soc handle
8986  * @pdev_id: id of datapath pdev handle
8987  * @is_pdev_down: pdev down/up status
8988  *
8989  * Return: QDF_STATUS
8990  */
8991 static QDF_STATUS
8992 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id,
8993 			    bool is_pdev_down)
8994 {
8995 	struct dp_pdev *pdev =
8996 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8997 						   pdev_id);
8998 	if (!pdev)
8999 		return QDF_STATUS_E_FAILURE;
9000 
9001 	pdev->is_pdev_down = is_pdev_down;
9002 	return QDF_STATUS_SUCCESS;
9003 }
9004 
9005 /**
9006  * dp_get_cfg_capabilities() - get dp capabilities
9007  * @soc_handle: datapath soc handle
9008  * @dp_caps: enum for dp capabilities
9009  *
9010  * Return: bool to determine if dp caps is enabled
9011  */
9012 static bool
9013 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
9014 			enum cdp_capabilities dp_caps)
9015 {
9016 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9017 
9018 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
9019 }
9020 
9021 #ifdef FEATURE_AST
9022 static QDF_STATUS
9023 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
9024 		       uint8_t *peer_mac)
9025 {
9026 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9027 	QDF_STATUS status = QDF_STATUS_SUCCESS;
9028 	struct dp_peer *peer =
9029 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id);
9030 
9031 	/* Peer can be null for monitor vap mac address */
9032 	if (!peer || peer->delete_in_progress) {
9033 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
9034 			  "%s: Invalid peer\n", __func__);
9035 		status = QDF_STATUS_E_FAILURE;
9036 		goto fail;
9037 	}
9038 	/*
9039 	 * For BSS peer, new peer is not created on alloc_node if the
9040 	 * peer with same address already exists , instead refcnt is
9041 	 * increased for existing peer. Correspondingly in delete path,
9042 	 * only refcnt is decreased; and peer is only deleted , when all
9043 	 * references are deleted. So delete_in_progress should not be set
9044 	 * for bss_peer, unless only 3 reference remains (peer map reference,
9045 	 * peer hash table reference and above local reference).
9046 	 */
9047 	if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 3)) {
9048 		status =  QDF_STATUS_E_FAILURE;
9049 		goto fail;
9050 	}
9051 
9052 	qdf_spin_lock_bh(&soc->ast_lock);
9053 	peer->delete_in_progress = true;
9054 	dp_peer_delete_ast_entries(soc, peer);
9055 	qdf_spin_unlock_bh(&soc->ast_lock);
9056 
9057 fail:
9058 	if (peer)
9059 		dp_peer_unref_delete(peer);
9060 	return status;
9061 }
9062 #endif
9063 
9064 #ifdef ATH_SUPPORT_NAC_RSSI
9065 /**
9066  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
9067  * @vdev_hdl: DP vdev handle
9068  * @rssi: rssi value
9069  *
9070  * Return: 0 for success. nonzero for failure.
9071  */
9072 static QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
9073 					     char *mac_addr, uint8_t *rssi) {
9074 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9075 	struct dp_pdev *pdev = vdev->pdev;
9076 	struct dp_neighbour_peer *peer = NULL;
9077 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
9078 
9079 	*rssi = 0;
9080 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
9081 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
9082 		      neighbour_peer_list_elem) {
9083 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
9084 				mac_addr, QDF_MAC_ADDR_SIZE) == 0) {
9085 			*rssi = peer->rssi;
9086 			status = QDF_STATUS_SUCCESS;
9087 			break;
9088 		}
9089 	}
9090 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
9091 	return status;
9092 }
9093 
9094 static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
9095 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
9096 		uint8_t chan_num)
9097 {
9098 
9099 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
9100 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
9101 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
9102 
9103 	pdev->nac_rssi_filtering = 1;
9104 	/* Store address of NAC (neighbour peer) which will be checked
9105 	 * against TA of received packets.
9106 	 */
9107 
9108 	if (cmd == CDP_NAC_PARAM_ADD) {
9109 		dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
9110 						 client_macaddr);
9111 	} else if (cmd == CDP_NAC_PARAM_DEL) {
9112 		dp_update_filter_neighbour_peers(vdev_handle,
9113 						 DP_NAC_PARAM_DEL,
9114 						 client_macaddr);
9115 	}
9116 
9117 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
9118 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
9119 			(soc->ctrl_psoc, pdev->pdev_id,
9120 			 vdev->vdev_id, cmd, bssid, client_macaddr);
9121 
9122 	return QDF_STATUS_SUCCESS;
9123 }
9124 #endif
9125 
9126 /**
9127  * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
9128  * for pktlog
9129  * @txrx_pdev_handle: cdp_pdev handle
9130  * @enb_dsb: Enable or disable peer based filtering
9131  *
9132  * Return: QDF_STATUS
9133  */
9134 static int
9135 dp_enable_peer_based_pktlog(
9136 	struct cdp_pdev *txrx_pdev_handle,
9137 	char *mac_addr, uint8_t enb_dsb)
9138 {
9139 	struct dp_peer *peer;
9140 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev_handle;
9141 
9142 	peer = (struct dp_peer *)dp_find_peer_by_addr(txrx_pdev_handle,
9143 			mac_addr);
9144 
9145 	if (!peer) {
9146 		dp_err("Invalid Peer");
9147 		return QDF_STATUS_E_FAILURE;
9148 	}
9149 
9150 	peer->peer_based_pktlog_filter = enb_dsb;
9151 	pdev->dp_peer_based_pktlog = enb_dsb;
9152 
9153 	return QDF_STATUS_SUCCESS;
9154 }
9155 
9156 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
9157 /**
9158  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
9159  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
9160  * @pdev_handle: cdp_pdev handle
9161  * @protocol_type: protocol type for which stats should be displayed
9162  *
9163  * Return: none
9164  */
9165 static inline void
9166 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_pdev *pdev_handle,
9167 				   uint16_t protocol_type)
9168 {
9169 }
9170 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
9171 
9172 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
9173 /**
9174  * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
9175  * applied to the desired protocol type packets
9176  * @txrx_pdev_handle: cdp_pdev handle
9177  * @enable_rx_protocol_tag - bitmask that indicates what protocol types
9178  * are enabled for tagging. zero indicates disable feature, non-zero indicates
9179  * enable feature
9180  * @protocol_type: new protocol type for which the tag is being added
9181  * @tag: user configured tag for the new protocol
9182  *
9183  * Return: Success
9184  */
9185 static inline QDF_STATUS
9186 dp_update_pdev_rx_protocol_tag(struct cdp_pdev *pdev_handle,
9187 			       uint32_t enable_rx_protocol_tag,
9188 			       uint16_t protocol_type,
9189 			       uint16_t tag)
9190 {
9191 	return QDF_STATUS_SUCCESS;
9192 }
9193 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
9194 
9195 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
9196 /**
9197  * dp_set_rx_flow_tag - add/delete a flow
9198  * @pdev_handle: cdp_pdev handle
9199  * @flow_info: flow tuple that is to be added to/deleted from flow search table
9200  *
9201  * Return: Success
9202  */
9203 static inline QDF_STATUS
9204 dp_set_rx_flow_tag(struct cdp_pdev *pdev_handle,
9205 		   struct cdp_rx_flow_info *flow_info)
9206 {
9207 	return QDF_STATUS_SUCCESS;
9208 }
9209 /**
9210  * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
9211  * given flow 5-tuple
9212  * @pdev_handle: cdp_pdev handle
9213  * @flow_info: flow 5-tuple for which stats should be displayed
9214  *
9215  * Return: Success
9216  */
9217 static inline QDF_STATUS
9218 dp_dump_rx_flow_tag_stats(struct cdp_pdev *pdev_handle,
9219 			  struct cdp_rx_flow_info *flow_info)
9220 {
9221 	return QDF_STATUS_SUCCESS;
9222 }
9223 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
9224 
9225 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
9226 					   uint32_t max_peers,
9227 					   uint32_t max_ast_index,
9228 					   bool peer_map_unmap_v2)
9229 {
9230 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9231 
9232 	soc->max_peers = max_peers;
9233 
9234 	qdf_print ("%s max_peers %u, max_ast_index: %u\n",
9235 		   __func__, max_peers, max_ast_index);
9236 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
9237 
9238 	if (dp_peer_find_attach(soc))
9239 		return QDF_STATUS_E_FAILURE;
9240 
9241 	soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
9242 
9243 	return QDF_STATUS_SUCCESS;
9244 }
9245 
9246 static QDF_STATUS dp_set_rate_stats_cap(struct cdp_soc_t *soc_hdl,
9247 					uint8_t val)
9248 {
9249 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9250 
9251 	soc->wlanstats_enabled = val;
9252 
9253 	return QDF_STATUS_SUCCESS;
9254 }
9255 
9256 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
9257 				      void *stats_ctx)
9258 {
9259 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9260 
9261 	soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx;
9262 }
9263 
9264 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9265 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
9266 					  uint8_t pdev_id)
9267 {
9268 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9269 	struct dp_vdev *vdev = NULL;
9270 	struct dp_peer *peer = NULL;
9271 	struct dp_pdev *pdev =
9272 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9273 						   pdev_id);
9274 
9275 	if (!pdev)
9276 		return QDF_STATUS_E_FAILURE;
9277 
9278 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
9279 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
9280 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
9281 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
9282 			if (peer && !peer->bss_peer)
9283 				dp_wdi_event_handler(
9284 					WDI_EVENT_FLUSH_RATE_STATS_REQ,
9285 					soc, peer->wlanstats_ctx,
9286 					peer->peer_ids[0],
9287 					WDI_NO_VAL, pdev_id);
9288 		}
9289 	}
9290 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
9291 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
9292 
9293 	return QDF_STATUS_SUCCESS;
9294 }
9295 #else
9296 static inline QDF_STATUS
9297 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
9298 			uint8_t pdev_id)
9299 {
9300 	return QDF_STATUS_SUCCESS;
9301 }
9302 #endif
9303 
9304 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9305 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
9306 					   uint8_t pdev_id,
9307 					   void *buf)
9308 {
9309 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
9310 			      (struct dp_soc *)soc, buf, HTT_INVALID_PEER,
9311 			      WDI_NO_VAL, pdev_id);
9312 	return QDF_STATUS_SUCCESS;
9313 }
9314 #else
9315 static inline QDF_STATUS
9316 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
9317 			 uint8_t pdev_id,
9318 			 void *buf)
9319 {
9320 	return QDF_STATUS_SUCCESS;
9321 }
9322 #endif
9323 
9324 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
9325 {
9326 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9327 
9328 	return soc->rate_stats_ctx;
9329 }
9330 
9331 /*
9332  * dp_get_cfg() - get dp cfg
9333  * @soc: cdp soc handle
9334  * @cfg: cfg enum
9335  *
9336  * Return: cfg value
9337  */
9338 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
9339 {
9340 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
9341 	uint32_t value = 0;
9342 
9343 	switch (cfg) {
9344 	case cfg_dp_enable_data_stall:
9345 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
9346 		break;
9347 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
9348 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
9349 		break;
9350 	case cfg_dp_tso_enable:
9351 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
9352 		break;
9353 	case cfg_dp_lro_enable:
9354 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
9355 		break;
9356 	case cfg_dp_gro_enable:
9357 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
9358 		break;
9359 	case cfg_dp_tx_flow_start_queue_offset:
9360 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
9361 		break;
9362 	case cfg_dp_tx_flow_stop_queue_threshold:
9363 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
9364 		break;
9365 	case cfg_dp_disable_intra_bss_fwd:
9366 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
9367 		break;
9368 	default:
9369 		value =  0;
9370 	}
9371 
9372 	return value;
9373 }
9374 
9375 #ifdef PEER_FLOW_CONTROL
9376 /**
9377  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
9378  * @soc_handle: datapath soc handle
9379  * @pdev_id: id of datapath pdev handle
9380  * @param: ol ath params
9381  * @value: value of the flag
9382  * @buff: Buffer to be passed
9383  *
9384  * Implemented this function same as legacy function. In legacy code, single
9385  * function is used to display stats and update pdev params.
9386  *
9387  * Return: 0 for success. nonzero for failure.
9388  */
9389 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle,
9390 					       uint8_t pdev_id,
9391 					       enum _ol_ath_param_t param,
9392 					       uint32_t value, void *buff)
9393 {
9394 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9395 	struct dp_pdev *pdev =
9396 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9397 						   pdev_id);
9398 
9399 	if (qdf_unlikely(!pdev))
9400 		return 1;
9401 
9402 	soc = pdev->soc;
9403 	if (!soc)
9404 		return 1;
9405 
9406 	switch (param) {
9407 #ifdef QCA_ENH_V3_STATS_SUPPORT
9408 	case OL_ATH_PARAM_VIDEO_DELAY_STATS_FC:
9409 		if (value)
9410 			pdev->delay_stats_flag = true;
9411 		else
9412 			pdev->delay_stats_flag = false;
9413 		break;
9414 	case OL_ATH_PARAM_VIDEO_STATS_FC:
9415 		qdf_print("------- TID Stats ------\n");
9416 		dp_pdev_print_tid_stats(pdev);
9417 		qdf_print("------ Delay Stats ------\n");
9418 		dp_pdev_print_delay_stats(pdev);
9419 		break;
9420 #endif
9421 	case OL_ATH_PARAM_TOTAL_Q_SIZE:
9422 		{
9423 			uint32_t tx_min, tx_max;
9424 
9425 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
9426 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
9427 
9428 			if (!buff) {
9429 				if ((value >= tx_min) && (value <= tx_max)) {
9430 					pdev->num_tx_allowed = value;
9431 				} else {
9432 					QDF_TRACE(QDF_MODULE_ID_DP,
9433 						  QDF_TRACE_LEVEL_INFO,
9434 						  "Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
9435 						  tx_min, tx_max);
9436 					break;
9437 				}
9438 			} else {
9439 				*(int *)buff = pdev->num_tx_allowed;
9440 			}
9441 		}
9442 		break;
9443 	default:
9444 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
9445 			  "%s: not handled param %d ", __func__, param);
9446 		break;
9447 	}
9448 
9449 	return 0;
9450 }
9451 #endif
9452 
9453 /**
9454  * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
9455  * @psoc: dp soc handle
9456  * @pdev_id: id of DP_PDEV handle
9457  * @pcp: pcp value
9458  * @tid: tid value passed by the user
9459  *
9460  * Return: QDF_STATUS_SUCCESS on success
9461  */
9462 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
9463 						uint8_t pdev_id,
9464 						uint8_t pcp, uint8_t tid)
9465 {
9466 	struct dp_soc *soc = (struct dp_soc *)psoc;
9467 
9468 	soc->pcp_tid_map[pcp] = tid;
9469 
9470 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
9471 	return QDF_STATUS_SUCCESS;
9472 }
9473 
9474 /**
9475  * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
9476  * @vdev: DP_PDEV handle
9477  * @prio: tidmap priority value passed by the user
9478  *
9479  * Return: QDF_STATUS_SUCCESS on success
9480  */
9481 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct cdp_pdev *pdev_handle,
9482 						uint8_t prio)
9483 {
9484 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9485 	struct dp_soc *soc = pdev->soc;
9486 
9487 	soc->tidmap_prty = prio;
9488 
9489 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
9490 	return QDF_STATUS_SUCCESS;
9491 }
9492 
9493 /**
9494  * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
9495  * @soc: DP soc handle
9496  * @vdev_id: id of DP_VDEV handle
9497  * @pcp: pcp value
9498  * @tid: tid value passed by the user
9499  *
9500  * Return: QDF_STATUS_SUCCESS on success
9501  */
9502 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc,
9503 						uint8_t vdev_id,
9504 						uint8_t pcp, uint8_t tid)
9505 {
9506 	struct dp_vdev *vdev =
9507 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
9508 						   vdev_id);
9509 
9510 	if (!vdev)
9511 		return QDF_STATUS_E_FAILURE;
9512 
9513 	vdev->pcp_tid_map[pcp] = tid;
9514 
9515 	return QDF_STATUS_SUCCESS;
9516 }
9517 
9518 /**
9519  * dp_set_vdev_tidmap_tbl_id_wifi3(): update tidmapi tbl id in vdev
9520  * @vdev: DP_VDEV handle
9521  * @mapid: map_id value passed by the user
9522  *
9523  * Return: QDF_STATUS_SUCCESS on success
9524  */
9525 static QDF_STATUS dp_set_vdev_tidmap_tbl_id_wifi3(struct cdp_vdev *vdev_handle,
9526 						  uint8_t mapid)
9527 {
9528 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
9529 
9530 	vdev->tidmap_tbl_id = mapid;
9531 
9532 	return QDF_STATUS_SUCCESS;
9533 }
9534 
9535 /**
9536  * dp_set_vdev_tidmap_prty_wifi3(): update tidmap priority in vdev
9537  * @vdev: DP_VDEV handle
9538  * @prio: tidmap priority value passed by the user
9539  *
9540  * Return: QDF_STATUS_SUCCESS on success
9541  */
9542 static QDF_STATUS dp_set_vdev_tidmap_prty_wifi3(struct cdp_vdev *vdev_handle,
9543 						uint8_t prio)
9544 {
9545 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
9546 
9547 	vdev->tidmap_prty = prio;
9548 
9549 	return QDF_STATUS_SUCCESS;
9550 }
9551 
9552 static struct cdp_cmn_ops dp_ops_cmn = {
9553 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
9554 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
9555 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
9556 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
9557 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
9558 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
9559 	.txrx_peer_create = dp_peer_create_wifi3,
9560 	.txrx_peer_setup = dp_peer_setup_wifi3,
9561 #ifdef FEATURE_AST
9562 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
9563 #else
9564 	.txrx_peer_teardown = NULL,
9565 #endif
9566 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
9567 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
9568 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
9569 	.txrx_peer_get_ast_info_by_pdev =
9570 		dp_peer_get_ast_info_by_pdevid_wifi3,
9571 	.txrx_peer_ast_delete_by_soc =
9572 		dp_peer_ast_entry_del_by_soc,
9573 	.txrx_peer_ast_delete_by_pdev =
9574 		dp_peer_ast_entry_del_by_pdev,
9575 	.txrx_peer_delete = dp_peer_delete_wifi3,
9576 	.txrx_vdev_register = dp_vdev_register_wifi3,
9577 	.txrx_soc_detach = dp_soc_detach_wifi3,
9578 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
9579 	.txrx_soc_init = dp_soc_init_wifi3,
9580 	.txrx_tso_soc_attach = dp_tso_soc_attach,
9581 	.txrx_tso_soc_detach = dp_tso_soc_detach,
9582 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
9583 	.txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
9584 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
9585 	.txrx_ath_getstats = dp_get_device_stats,
9586 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
9587 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
9588 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
9589 	.delba_process = dp_delba_process_wifi3,
9590 	.set_addba_response = dp_set_addba_response,
9591 	.flush_cache_rx_queue = NULL,
9592 	/* TODO: get API's for dscp-tid need to be added*/
9593 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
9594 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
9595 	.hmmc_tid_override_en = dp_hmmc_tid_override_en_wifi3,
9596 	.set_hmmc_tid_val = dp_set_hmmc_tid_val_wifi3,
9597 	.txrx_get_total_per = dp_get_total_per,
9598 	.txrx_stats_request = dp_txrx_stats_request,
9599 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
9600 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
9601 	.txrx_get_vow_config_frm_pdev = dp_get_delay_stats_flag,
9602 	.txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
9603 	.txrx_set_nac = dp_set_nac,
9604 	.txrx_get_tx_pending = dp_get_tx_pending,
9605 	.txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
9606 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
9607 	.display_stats = dp_txrx_dump_stats,
9608 	.txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
9609 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
9610 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
9611 	.txrx_intr_detach = dp_soc_interrupt_detach,
9612 	.set_pn_check = dp_set_pn_check_wifi3,
9613 	.update_config_parameters = dp_update_config_parameters,
9614 	/* TODO: Add other functions */
9615 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
9616 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
9617 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
9618 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
9619 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
9620 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
9621 	.set_pdev_status_down = dp_soc_set_pdev_status_down,
9622 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
9623 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
9624 	.tx_send = dp_tx_send,
9625 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
9626 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
9627 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
9628 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
9629 	.txrx_get_os_rx_handles_from_vdev =
9630 					dp_get_os_rx_handles_from_vdev_wifi3,
9631 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
9632 	.get_dp_capabilities = dp_get_cfg_capabilities,
9633 	.txrx_get_cfg = dp_get_cfg,
9634 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
9635 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
9636 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
9637 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
9638 
9639 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
9640 	.set_pdev_tidmap_prty = dp_set_pdev_tidmap_prty_wifi3,
9641 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
9642 	.set_vdev_tidmap_prty = dp_set_vdev_tidmap_prty_wifi3,
9643 	.set_vdev_tidmap_tbl_id = dp_set_vdev_tidmap_tbl_id_wifi3,
9644 
9645 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
9646 #ifdef QCA_MULTIPASS_SUPPORT
9647 	.set_vlan_groupkey = dp_set_vlan_groupkey,
9648 #endif
9649 };
9650 
9651 static struct cdp_ctrl_ops dp_ops_ctrl = {
9652 	.txrx_peer_authorize = dp_peer_authorize,
9653 	.txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
9654 	.txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
9655 #ifdef MESH_MODE_SUPPORT
9656 	.txrx_set_mesh_mode  = dp_peer_set_mesh_mode,
9657 	.txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
9658 #endif
9659 	.txrx_set_vdev_param = dp_set_vdev_param,
9660 	.txrx_peer_set_nawds = dp_peer_set_nawds,
9661 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
9662 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
9663 	.txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
9664 	.txrx_update_filter_neighbour_peers =
9665 		dp_update_filter_neighbour_peers,
9666 	.txrx_get_sec_type = dp_get_sec_type,
9667 	/* TODO: Add other functions */
9668 	.txrx_wdi_event_sub = dp_wdi_event_sub,
9669 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
9670 #ifdef WDI_EVENT_ENABLE
9671 	.txrx_get_pldev = dp_get_pldev,
9672 #endif
9673 	.txrx_set_pdev_param = dp_set_pdev_param,
9674 #ifdef ATH_SUPPORT_NAC_RSSI
9675 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
9676 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
9677 #endif
9678 	.set_key = dp_set_michael_key,
9679 	.txrx_get_vdev_param = dp_get_vdev_param,
9680 	.enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
9681 	.calculate_delay_stats = dp_calculate_delay_stats,
9682 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
9683 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
9684 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
9685 	.txrx_dump_pdev_rx_protocol_tag_stats =
9686 				dp_dump_pdev_rx_protocol_tag_stats,
9687 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
9688 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
9689 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
9690 	.txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
9691 	.txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
9692 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
9693 #ifdef QCA_MULTIPASS_SUPPORT
9694 	.txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
9695 #endif /*QCA_MULTIPASS_SUPPORT*/
9696 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
9697 	.txrx_update_peer_pkt_capture_params =
9698 		 dp_peer_update_pkt_capture_params,
9699 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
9700 };
9701 
9702 static struct cdp_me_ops dp_ops_me = {
9703 #ifdef ATH_SUPPORT_IQUE
9704 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
9705 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
9706 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
9707 #endif
9708 };
9709 
9710 static struct cdp_mon_ops dp_ops_mon = {
9711 	.txrx_monitor_set_filter_ucast_data = NULL,
9712 	.txrx_monitor_set_filter_mcast_data = NULL,
9713 	.txrx_monitor_set_filter_non_data = NULL,
9714 	.txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
9715 	.txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
9716 	.txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
9717 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
9718 	/* Added support for HK advance filter */
9719 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
9720 	.txrx_monitor_record_channel = dp_pdev_set_monitor_channel,
9721 	.txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt,
9722 	.txrx_set_bsscolor = dp_mon_set_bsscolor,
9723 };
9724 
9725 static struct cdp_host_stats_ops dp_ops_host_stats = {
9726 	.txrx_per_peer_stats = dp_get_host_peer_stats,
9727 	.get_fw_peer_stats = dp_get_fw_peer_stats,
9728 	.get_htt_stats = dp_get_htt_stats,
9729 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
9730 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
9731 	.txrx_stats_publish = dp_txrx_stats_publish,
9732 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
9733 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
9734 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
9735 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
9736 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
9737 	.configure_rate_stats = dp_set_rate_stats_cap,
9738 	.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
9739 	/* TODO */
9740 };
9741 
9742 static struct cdp_raw_ops dp_ops_raw = {
9743 	/* TODO */
9744 };
9745 
9746 #ifdef PEER_FLOW_CONTROL
9747 static struct cdp_pflow_ops dp_ops_pflow = {
9748 	dp_tx_flow_ctrl_configure_pdev,
9749 };
9750 #endif /* CONFIG_WIN */
9751 
9752 #ifdef FEATURE_RUNTIME_PM
9753 /**
9754  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
9755  * @soc_hdl: Datapath soc handle
9756  * @pdev_id: id of data path pdev handle
9757  *
9758  * DP is ready to runtime suspend if there are no pending TX packets.
9759  *
9760  * Return: QDF_STATUS
9761  */
9762 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
9763 {
9764 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9765 	struct dp_pdev *pdev;
9766 
9767 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
9768 	if (!pdev) {
9769 		dp_err("pdev is NULL");
9770 		return QDF_STATUS_E_INVAL;
9771 	}
9772 
9773 	/* Abort if there are any pending TX packets */
9774 	if (dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev)) > 0) {
9775 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
9776 			  FL("Abort suspend due to pending TX packets"));
9777 		return QDF_STATUS_E_AGAIN;
9778 	}
9779 
9780 	if (soc->intr_mode == DP_INTR_POLL)
9781 		qdf_timer_stop(&soc->int_timer);
9782 
9783 	return QDF_STATUS_SUCCESS;
9784 }
9785 
9786 /**
9787  * dp_flush_ring_hptp() - Update ring shadow
9788  *			  register HP/TP address when runtime
9789  *                        resume
9790  * @opaque_soc: DP soc context
9791  *
9792  * Return: None
9793  */
9794 static
9795 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
9796 {
9797 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
9798 						 HAL_SRNG_FLUSH_EVENT)) {
9799 		/* Acquire the lock */
9800 		hal_srng_access_start(soc->hal_soc, hal_srng);
9801 
9802 		hal_srng_access_end(soc->hal_soc, hal_srng);
9803 
9804 		hal_srng_set_flush_last_ts(hal_srng);
9805 	}
9806 }
9807 
9808 /**
9809  * dp_runtime_resume() - ensure DP is ready to runtime resume
9810  * @soc_hdl: Datapath soc handle
9811  * @pdev_id: id of data path pdev handle
9812  *
9813  * Resume DP for runtime PM.
9814  *
9815  * Return: QDF_STATUS
9816  */
9817 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
9818 {
9819 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9820 	int i;
9821 
9822 	if (soc->intr_mode == DP_INTR_POLL)
9823 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
9824 
9825 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
9826 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
9827 	}
9828 
9829 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
9830 
9831 	return QDF_STATUS_SUCCESS;
9832 }
9833 #endif /* FEATURE_RUNTIME_PM */
9834 
9835 /**
9836  * dp_tx_get_success_ack_stats() - get tx success completion count
9837  * @soc_hdl: Datapath soc handle
9838  * @vdevid: vdev identifier
9839  *
9840  * Return: tx success ack count
9841  */
9842 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
9843 					    uint8_t vdev_id)
9844 {
9845 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9846 	struct cdp_vdev_stats *vdev_stats = NULL;
9847 	uint32_t tx_success;
9848 	struct dp_vdev *vdev =
9849 		(struct dp_vdev *)dp_get_vdev_from_soc_vdev_id_wifi3(soc,
9850 								     vdev_id);
9851 
9852 	if (!vdev) {
9853 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9854 			  FL("Invalid vdev id %d"), vdev_id);
9855 		return 0;
9856 	}
9857 
9858 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
9859 	if (!vdev_stats) {
9860 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9861 			  "DP alloc failure - unable to get alloc vdev stats");
9862 		return 0;
9863 	}
9864 
9865 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
9866 	dp_aggregate_vdev_stats(vdev, vdev_stats);
9867 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
9868 
9869 	tx_success = vdev_stats->tx.tx_success.num;
9870 	qdf_mem_free(vdev_stats);
9871 
9872 	return tx_success;
9873 }
9874 
9875 #ifdef WLAN_SUPPORT_DATA_STALL
9876 /**
9877  * dp_register_data_stall_detect_cb() - register data stall callback
9878  * @soc_hdl: Datapath soc handle
9879  * @pdev_id: id of data path pdev handle
9880  * @data_stall_detect_callback: data stall callback function
9881  *
9882  * Return: QDF_STATUS Enumeration
9883  */
9884 static
9885 QDF_STATUS dp_register_data_stall_detect_cb(
9886 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
9887 			data_stall_detect_cb data_stall_detect_callback)
9888 {
9889 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9890 	struct dp_pdev *pdev;
9891 
9892 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
9893 	if (!pdev) {
9894 		dp_err("pdev NULL!");
9895 		return QDF_STATUS_E_INVAL;
9896 	}
9897 
9898 	pdev->data_stall_detect_callback = data_stall_detect_callback;
9899 	return QDF_STATUS_SUCCESS;
9900 }
9901 
9902 /**
9903  * dp_deregister_data_stall_detect_cb() - de-register data stall callback
9904  * @soc_hdl: Datapath soc handle
9905  * @pdev_id: id of data path pdev handle
9906  * @data_stall_detect_callback: data stall callback function
9907  *
9908  * Return: QDF_STATUS Enumeration
9909  */
9910 static
9911 QDF_STATUS dp_deregister_data_stall_detect_cb(
9912 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
9913 			data_stall_detect_cb data_stall_detect_callback)
9914 {
9915 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9916 	struct dp_pdev *pdev;
9917 
9918 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
9919 	if (!pdev) {
9920 		dp_err("pdev NULL!");
9921 		return QDF_STATUS_E_INVAL;
9922 	}
9923 
9924 	pdev->data_stall_detect_callback = NULL;
9925 	return QDF_STATUS_SUCCESS;
9926 }
9927 
9928 /**
9929  * dp_txrx_post_data_stall_event() - post data stall event
9930  * @soc_hdl: Datapath soc handle
9931  * @indicator: Module triggering data stall
9932  * @data_stall_type: data stall event type
9933  * @pdev_id: pdev id
9934  * @vdev_id_bitmap: vdev id bitmap
9935  * @recovery_type: data stall recovery type
9936  *
9937  * Return: None
9938  */
9939 static void
9940 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
9941 			      enum data_stall_log_event_indicator indicator,
9942 			      enum data_stall_log_event_type data_stall_type,
9943 			      uint32_t pdev_id, uint32_t vdev_id_bitmap,
9944 			      enum data_stall_log_recovery_type recovery_type)
9945 {
9946 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9947 	struct data_stall_event_info data_stall_info;
9948 	struct dp_pdev *pdev;
9949 
9950 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
9951 	if (!pdev) {
9952 		dp_err("pdev NULL!");
9953 		return;
9954 	}
9955 
9956 	if (!pdev->data_stall_detect_callback) {
9957 		dp_err("data stall cb not registered!");
9958 		return;
9959 	}
9960 
9961 	dp_info("data_stall_type: %x pdev_id: %d",
9962 		data_stall_type, pdev_id);
9963 
9964 	data_stall_info.indicator = indicator;
9965 	data_stall_info.data_stall_type = data_stall_type;
9966 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
9967 	data_stall_info.pdev_id = pdev_id;
9968 	data_stall_info.recovery_type = recovery_type;
9969 
9970 	pdev->data_stall_detect_callback(&data_stall_info);
9971 }
9972 #endif /* WLAN_SUPPORT_DATA_STALL */
9973 
9974 #ifdef DP_PEER_EXTENDED_API
9975 static struct cdp_misc_ops dp_ops_misc = {
9976 #ifdef FEATURE_WLAN_TDLS
9977 	.tx_non_std = dp_tx_non_std,
9978 #endif /* FEATURE_WLAN_TDLS */
9979 	.get_opmode = dp_get_opmode,
9980 #ifdef FEATURE_RUNTIME_PM
9981 	.runtime_suspend = dp_runtime_suspend,
9982 	.runtime_resume = dp_runtime_resume,
9983 #endif /* FEATURE_RUNTIME_PM */
9984 	.pkt_log_init = dp_pkt_log_init,
9985 	.pkt_log_con_service = dp_pkt_log_con_service,
9986 	.get_num_rx_contexts = dp_get_num_rx_contexts,
9987 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
9988 #ifdef WLAN_SUPPORT_DATA_STALL
9989 	.txrx_data_stall_cb_register = dp_register_data_stall_detect_cb,
9990 	.txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb,
9991 	.txrx_post_data_stall_event = dp_txrx_post_data_stall_event,
9992 #endif
9993 };
9994 #endif
9995 
9996 #ifdef DP_FLOW_CTL
9997 static struct cdp_flowctl_ops dp_ops_flowctl = {
9998 	/* WIFI 3.0 DP implement as required. */
9999 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
10000 	.flow_pool_map_handler = dp_tx_flow_pool_map,
10001 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
10002 	.register_pause_cb = dp_txrx_register_pause_cb,
10003 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
10004 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
10005 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
10006 };
10007 
10008 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
10009 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
10010 };
10011 #endif
10012 
10013 #ifdef IPA_OFFLOAD
10014 static struct cdp_ipa_ops dp_ops_ipa = {
10015 	.ipa_get_resource = dp_ipa_get_resource,
10016 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
10017 	.ipa_op_response = dp_ipa_op_response,
10018 	.ipa_register_op_cb = dp_ipa_register_op_cb,
10019 	.ipa_get_stat = dp_ipa_get_stat,
10020 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
10021 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
10022 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
10023 	.ipa_setup = dp_ipa_setup,
10024 	.ipa_cleanup = dp_ipa_cleanup,
10025 	.ipa_setup_iface = dp_ipa_setup_iface,
10026 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
10027 	.ipa_enable_pipes = dp_ipa_enable_pipes,
10028 	.ipa_disable_pipes = dp_ipa_disable_pipes,
10029 	.ipa_set_perf_level = dp_ipa_set_perf_level,
10030 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd
10031 };
10032 #endif
10033 
10034 #ifdef DP_POWER_SAVE
10035 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10036 {
10037 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10038 	struct cdp_pdev *pdev = (struct cdp_pdev *)
10039 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10040 	int timeout = SUSPEND_DRAIN_WAIT;
10041 	int drain_wait_delay = 50; /* 50 ms */
10042 
10043 	if (qdf_unlikely(!pdev)) {
10044 		dp_err("pdev is NULL");
10045 		return QDF_STATUS_E_INVAL;
10046 	}
10047 
10048 	/* Abort if there are any pending TX packets */
10049 	while (dp_get_tx_pending(pdev) > 0) {
10050 		qdf_sleep(drain_wait_delay);
10051 		if (timeout <= 0) {
10052 			dp_err("TX frames are pending, abort suspend");
10053 			return QDF_STATUS_E_TIMEOUT;
10054 		}
10055 		timeout = timeout - drain_wait_delay;
10056 	}
10057 
10058 	if (soc->intr_mode == DP_INTR_POLL)
10059 		qdf_timer_stop(&soc->int_timer);
10060 
10061 	return QDF_STATUS_SUCCESS;
10062 }
10063 
10064 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10065 {
10066 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10067 
10068 	if (soc->intr_mode == DP_INTR_POLL)
10069 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
10070 
10071 	return QDF_STATUS_SUCCESS;
10072 }
10073 
10074 static struct cdp_bus_ops dp_ops_bus = {
10075 	.bus_suspend = dp_bus_suspend,
10076 	.bus_resume = dp_bus_resume
10077 };
10078 #endif
10079 
10080 #ifdef DP_FLOW_CTL
10081 static struct cdp_throttle_ops dp_ops_throttle = {
10082 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
10083 };
10084 
10085 static struct cdp_cfg_ops dp_ops_cfg = {
10086 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
10087 };
10088 #endif
10089 
10090 #ifdef DP_PEER_EXTENDED_API
10091 static struct cdp_ocb_ops dp_ops_ocb = {
10092 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
10093 };
10094 
10095 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
10096 	.clear_stats = dp_txrx_clear_dump_stats,
10097 };
10098 
10099 /*
10100  * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
10101  * @dev: physical device instance
10102  * @peer_mac_addr: peer mac address
10103  * @debug_id: to track enum peer access
10104  *
10105  * Return: peer instance pointer
10106  */
10107 static inline void *
10108 dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
10109 			     enum peer_debug_id_type debug_id)
10110 {
10111 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
10112 	struct dp_peer *peer;
10113 
10114 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
10115 
10116 	if (!peer)
10117 		return NULL;
10118 
10119 	dp_info_rl("peer %pK mac: %pM", peer, peer->mac_addr.raw);
10120 
10121 	return peer;
10122 }
10123 
10124 /*
10125  * dp_peer_release_ref - release peer ref count
10126  * @peer: peer handle
10127  * @debug_id: to track enum peer access
10128  *
10129  * Return: None
10130  */
10131 static inline
10132 void dp_peer_release_ref(void *peer, enum peer_debug_id_type debug_id)
10133 {
10134 	dp_peer_unref_delete(peer);
10135 }
10136 
10137 static struct cdp_peer_ops dp_ops_peer = {
10138 	.register_peer = dp_register_peer,
10139 	.clear_peer = dp_clear_peer,
10140 	.find_peer_by_addr = dp_find_peer_by_addr,
10141 	.find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
10142 	.peer_get_ref_by_addr = dp_peer_get_ref_find_by_addr,
10143 	.peer_release_ref = dp_peer_release_ref,
10144 	.peer_state_update = dp_peer_state_update,
10145 	.get_vdevid = dp_get_vdevid,
10146 	.get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
10147 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
10148 	.get_vdev_for_peer = dp_get_vdev_for_peer,
10149 	.get_peer_state = dp_get_peer_state,
10150 };
10151 #endif
10152 
10153 static struct cdp_ops dp_txrx_ops = {
10154 	.cmn_drv_ops = &dp_ops_cmn,
10155 	.ctrl_ops = &dp_ops_ctrl,
10156 	.me_ops = &dp_ops_me,
10157 	.mon_ops = &dp_ops_mon,
10158 	.host_stats_ops = &dp_ops_host_stats,
10159 	.wds_ops = &dp_ops_wds,
10160 	.raw_ops = &dp_ops_raw,
10161 #ifdef PEER_FLOW_CONTROL
10162 	.pflow_ops = &dp_ops_pflow,
10163 #endif /* PEER_FLOW_CONTROL */
10164 #ifdef DP_PEER_EXTENDED_API
10165 	.misc_ops = &dp_ops_misc,
10166 	.ocb_ops = &dp_ops_ocb,
10167 	.peer_ops = &dp_ops_peer,
10168 	.mob_stats_ops = &dp_ops_mob_stats,
10169 #endif
10170 #ifdef DP_FLOW_CTL
10171 	.cfg_ops = &dp_ops_cfg,
10172 	.flowctl_ops = &dp_ops_flowctl,
10173 	.l_flowctl_ops = &dp_ops_l_flowctl,
10174 	.throttle_ops = &dp_ops_throttle,
10175 #endif
10176 #ifdef IPA_OFFLOAD
10177 	.ipa_ops = &dp_ops_ipa,
10178 #endif
10179 #ifdef DP_POWER_SAVE
10180 	.bus_ops = &dp_ops_bus,
10181 #endif
10182 };
10183 
10184 /*
10185  * dp_soc_set_txrx_ring_map()
10186  * @dp_soc: DP handler for soc
10187  *
10188  * Return: Void
10189  */
10190 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
10191 {
10192 	uint32_t i;
10193 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
10194 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
10195 	}
10196 }
10197 
10198 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018)
10199 
10200 #ifndef QCA_MEM_ATTACH_ON_WIFI3
10201 
10202 /**
10203  * dp_soc_attach_wifi3() - Attach txrx SOC
10204  * @ctrl_psoc: Opaque SOC handle from control plane
10205  * @htc_handle: Opaque HTC handle
10206  * @hif_handle: Opaque HIF handle
10207  * @qdf_osdev: QDF device
10208  * @ol_ops: Offload Operations
10209  * @device_id: Device ID
10210  *
10211  * Return: DP SOC handle on success, NULL on failure
10212  */
10213 struct cdp_soc_t *
10214 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
10215 		    struct hif_opaque_softc *hif_handle,
10216 		    HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
10217 		    struct ol_if_ops *ol_ops, uint16_t device_id)
10218 {
10219 	struct dp_soc *dp_soc =  NULL;
10220 
10221 	dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
10222 			       ol_ops, device_id);
10223 	if (!dp_soc)
10224 		return NULL;
10225 
10226 	if (!dp_soc_init(dp_soc, htc_handle, hif_handle))
10227 		return NULL;
10228 
10229 	return dp_soc_to_cdp_soc_t(dp_soc);
10230 }
10231 #else
10232 
10233 /**
10234  * dp_soc_attach_wifi3() - Attach txrx SOC
10235  * @ctrl_psoc: Opaque SOC handle from control plane
10236  * @htc_handle: Opaque HTC handle
10237  * @hif_handle: Opaque HIF handle
10238  * @qdf_osdev: QDF device
10239  * @ol_ops: Offload Operations
10240  * @device_id: Device ID
10241  *
10242  * Return: DP SOC handle on success, NULL on failure
10243  */
10244 struct cdp_soc_t *
10245 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
10246 		    struct hif_opaque_softc *hif_handle,
10247 		    HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
10248 		    struct ol_if_ops *ol_ops, uint16_t device_id)
10249 {
10250 	struct dp_soc *dp_soc = NULL;
10251 
10252 	dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
10253 			       ol_ops, device_id);
10254 	return dp_soc_to_cdp_soc_t(dp_soc);
10255 }
10256 
10257 #endif
10258 
10259 /**
10260  * dp_soc_attach() - Attach txrx SOC
10261  * @ctrl_psoc: Opaque SOC handle from control plane
10262  * @htc_handle: Opaque HTC handle
10263  * @qdf_osdev: QDF device
10264  * @ol_ops: Offload Operations
10265  * @device_id: Device ID
10266  *
10267  * Return: DP SOC handle on success, NULL on failure
10268  */
10269 static struct dp_soc *
10270 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, HTC_HANDLE htc_handle,
10271 	      qdf_device_t qdf_osdev,
10272 	      struct ol_if_ops *ol_ops, uint16_t device_id)
10273 {
10274 	int int_ctx;
10275 	struct dp_soc *soc =  NULL;
10276 	struct htt_soc *htt_soc;
10277 
10278 	soc = qdf_mem_malloc(sizeof(*soc));
10279 
10280 	if (!soc) {
10281 		dp_err("DP SOC memory allocation failed");
10282 		goto fail0;
10283 	}
10284 
10285 	int_ctx = 0;
10286 	soc->device_id = device_id;
10287 	soc->cdp_soc.ops = &dp_txrx_ops;
10288 	soc->cdp_soc.ol_ops = ol_ops;
10289 	soc->ctrl_psoc = ctrl_psoc;
10290 	soc->osdev = qdf_osdev;
10291 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
10292 
10293 	wlan_set_srng_cfg(&soc->wlan_srng_cfg);
10294 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
10295 
10296 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
10297 	if (!soc->wlan_cfg_ctx) {
10298 		dp_err("wlan_cfg_ctx failed\n");
10299 		goto fail1;
10300 	}
10301 
10302 	dp_soc_set_interrupt_mode(soc);
10303 	htt_soc = htt_soc_attach(soc, htc_handle);
10304 
10305 	if (!htt_soc)
10306 		goto fail1;
10307 
10308 	soc->htt_handle = htt_soc;
10309 
10310 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
10311 		goto fail2;
10312 
10313 	return soc;
10314 fail2:
10315 	htt_soc_detach(htt_soc);
10316 fail1:
10317 	qdf_mem_free(soc);
10318 fail0:
10319 	return NULL;
10320 }
10321 
10322 /**
10323  * dp_soc_init() - Initialize txrx SOC
10324  * @dp_soc: Opaque DP SOC handle
10325  * @htc_handle: Opaque HTC handle
10326  * @hif_handle: Opaque HIF handle
10327  *
10328  * Return: DP SOC handle on success, NULL on failure
10329  */
10330 void *dp_soc_init(struct dp_soc *dpsoc, HTC_HANDLE htc_handle,
10331 		  struct hif_opaque_softc *hif_handle)
10332 {
10333 	int target_type;
10334 	struct dp_soc *soc = (struct dp_soc *)dpsoc;
10335 	struct htt_soc *htt_soc = soc->htt_handle;
10336 
10337 	htt_set_htc_handle(htt_soc, htc_handle);
10338 	soc->hif_handle = hif_handle;
10339 
10340 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
10341 	if (!soc->hal_soc)
10342 		return NULL;
10343 
10344 	htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
10345 			   htt_get_htc_handle(htt_soc),
10346 			   soc->hal_soc, soc->osdev);
10347 	target_type = hal_get_target_type(soc->hal_soc);
10348 	switch (target_type) {
10349 	case TARGET_TYPE_QCA6290:
10350 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
10351 					       REO_DST_RING_SIZE_QCA6290);
10352 		soc->ast_override_support = 1;
10353 		soc->da_war_enabled = false;
10354 		break;
10355 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490)
10356 	case TARGET_TYPE_QCA6390:
10357 	case TARGET_TYPE_QCA6490:
10358 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
10359 					       REO_DST_RING_SIZE_QCA6290);
10360 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
10361 		soc->ast_override_support = 1;
10362 		if (soc->cdp_soc.ol_ops->get_con_mode &&
10363 		    soc->cdp_soc.ol_ops->get_con_mode() ==
10364 		    QDF_GLOBAL_MONITOR_MODE) {
10365 			int int_ctx;
10366 
10367 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
10368 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
10369 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
10370 			}
10371 		}
10372 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
10373 		break;
10374 #endif /* QCA_WIFI_QCA6390 || QCA_WIFI_QCA6490 */
10375 
10376 	case TARGET_TYPE_QCA8074:
10377 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
10378 					       REO_DST_RING_SIZE_QCA8074);
10379 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
10380 		soc->da_war_enabled = true;
10381 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
10382 		break;
10383 	case TARGET_TYPE_QCA8074V2:
10384 	case TARGET_TYPE_QCA6018:
10385 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
10386 					       REO_DST_RING_SIZE_QCA8074);
10387 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
10388 		soc->hw_nac_monitor_support = 1;
10389 		soc->ast_override_support = 1;
10390 		soc->per_tid_basize_max_tid = 8;
10391 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
10392 		soc->da_war_enabled = false;
10393 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
10394 		break;
10395 	case TARGET_TYPE_QCN9000:
10396 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
10397 					       REO_DST_RING_SIZE_QCN9000);
10398 		soc->ast_override_support = 1;
10399 		soc->da_war_enabled = false;
10400 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
10401 		soc->hw_nac_monitor_support = 1;
10402 		soc->per_tid_basize_max_tid = 8;
10403 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
10404 		break;
10405 	default:
10406 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
10407 		qdf_assert_always(0);
10408 		break;
10409 	}
10410 
10411 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
10412 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
10413 	soc->cce_disable = false;
10414 
10415 	qdf_atomic_init(&soc->num_tx_outstanding);
10416 	soc->num_tx_allowed =
10417 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
10418 
10419 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
10420 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
10421 				CDP_CFG_MAX_PEER_ID);
10422 
10423 		if (ret != -EINVAL) {
10424 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
10425 		}
10426 
10427 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
10428 				CDP_CFG_CCE_DISABLE);
10429 		if (ret == 1)
10430 			soc->cce_disable = true;
10431 	}
10432 
10433 	qdf_spinlock_create(&soc->peer_ref_mutex);
10434 	qdf_spinlock_create(&soc->ast_lock);
10435 
10436 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
10437 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
10438 
10439 	/* fill the tx/rx cpu ring map*/
10440 	dp_soc_set_txrx_ring_map(soc);
10441 
10442 	qdf_spinlock_create(&soc->htt_stats.lock);
10443 	/* initialize work queue for stats processing */
10444 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
10445 
10446 	return soc;
10447 
10448 }
10449 
10450 /**
10451  * dp_soc_init_wifi3() - Initialize txrx SOC
10452  * @dp_soc: Opaque DP SOC handle
10453  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
10454  * @hif_handle: Opaque HIF handle
10455  * @htc_handle: Opaque HTC handle
10456  * @qdf_osdev: QDF device (Unused)
10457  * @ol_ops: Offload Operations (Unused)
10458  * @device_id: Device ID (Unused)
10459  *
10460  * Return: DP SOC handle on success, NULL on failure
10461  */
10462 void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
10463 			struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
10464 			struct hif_opaque_softc *hif_handle,
10465 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
10466 			struct ol_if_ops *ol_ops, uint16_t device_id)
10467 {
10468 	return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle);
10469 }
10470 
10471 #endif
10472 
10473 /*
10474  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
10475  *
10476  * @soc: handle to DP soc
10477  * @mac_id: MAC id
10478  *
10479  * Return: Return pdev corresponding to MAC
10480  */
10481 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
10482 {
10483 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
10484 		return soc->pdev_list[mac_id];
10485 
10486 	/* Typically for MCL as there only 1 PDEV*/
10487 	return soc->pdev_list[0];
10488 }
10489 
10490 /*
10491  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
10492  * @soc:		DP SoC context
10493  * @max_mac_rings:	No of MAC rings
10494  *
10495  * Return: None
10496  */
10497 static
10498 void dp_is_hw_dbs_enable(struct dp_soc *soc,
10499 				int *max_mac_rings)
10500 {
10501 	bool dbs_enable = false;
10502 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
10503 		dbs_enable = soc->cdp_soc.ol_ops->
10504 		is_hw_dbs_2x2_capable((void *)soc->ctrl_psoc);
10505 
10506 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
10507 }
10508 
10509 /*
10510 * dp_is_soc_reinit() - Check if soc reinit is true
10511 * @soc: DP SoC context
10512 *
10513 * Return: true or false
10514 */
10515 bool dp_is_soc_reinit(struct dp_soc *soc)
10516 {
10517 	return soc->dp_soc_reinit;
10518 }
10519 
10520 /*
10521 * dp_set_pktlog_wifi3() - attach txrx vdev
10522 * @pdev: Datapath PDEV handle
10523 * @event: which event's notifications are being subscribed to
10524 * @enable: WDI event subscribe or not. (True or False)
10525 *
10526 * Return: Success, NULL on failure
10527 */
10528 #ifdef WDI_EVENT_ENABLE
10529 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
10530 		bool enable)
10531 {
10532 	struct dp_soc *soc = NULL;
10533 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
10534 	int max_mac_rings = wlan_cfg_get_num_mac_rings
10535 					(pdev->wlan_cfg_ctx);
10536 	uint8_t mac_id = 0;
10537 
10538 	soc = pdev->soc;
10539 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
10540 
10541 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
10542 			FL("Max_mac_rings %d "),
10543 			max_mac_rings);
10544 
10545 	if (enable) {
10546 		switch (event) {
10547 		case WDI_EVENT_RX_DESC:
10548 			if (pdev->monitor_vdev) {
10549 				/* Nothing needs to be done if monitor mode is
10550 				 * enabled
10551 				 */
10552 				return 0;
10553 			}
10554 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
10555 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
10556 				htt_tlv_filter.mpdu_start = 1;
10557 				htt_tlv_filter.msdu_start = 1;
10558 				htt_tlv_filter.msdu_end = 1;
10559 				htt_tlv_filter.mpdu_end = 1;
10560 				htt_tlv_filter.packet_header = 1;
10561 				htt_tlv_filter.attention = 1;
10562 				htt_tlv_filter.ppdu_start = 1;
10563 				htt_tlv_filter.ppdu_end = 1;
10564 				htt_tlv_filter.ppdu_end_user_stats = 1;
10565 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
10566 				htt_tlv_filter.ppdu_end_status_done = 1;
10567 				htt_tlv_filter.enable_fp = 1;
10568 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
10569 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
10570 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
10571 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
10572 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
10573 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
10574 				htt_tlv_filter.offset_valid = false;
10575 
10576 				for (mac_id = 0; mac_id < max_mac_rings;
10577 								mac_id++) {
10578 					int mac_for_pdev =
10579 						dp_get_mac_id_for_pdev(mac_id,
10580 								pdev->pdev_id);
10581 
10582 					htt_h2t_rx_ring_cfg(soc->htt_handle,
10583 					 mac_for_pdev,
10584 					 pdev->rxdma_mon_status_ring[mac_id]
10585 					 .hal_srng,
10586 					 RXDMA_MONITOR_STATUS,
10587 					 RX_BUFFER_SIZE,
10588 					 &htt_tlv_filter);
10589 
10590 				}
10591 
10592 				if (soc->reap_timer_init)
10593 					qdf_timer_mod(&soc->mon_reap_timer,
10594 					DP_INTR_POLL_TIMER_MS);
10595 			}
10596 			break;
10597 
10598 		case WDI_EVENT_LITE_RX:
10599 			if (pdev->monitor_vdev) {
10600 				/* Nothing needs to be done if monitor mode is
10601 				 * enabled
10602 				 */
10603 				return 0;
10604 			}
10605 
10606 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
10607 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
10608 
10609 				htt_tlv_filter.ppdu_start = 1;
10610 				htt_tlv_filter.ppdu_end = 1;
10611 				htt_tlv_filter.ppdu_end_user_stats = 1;
10612 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
10613 				htt_tlv_filter.ppdu_end_status_done = 1;
10614 				htt_tlv_filter.mpdu_start = 1;
10615 				htt_tlv_filter.enable_fp = 1;
10616 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
10617 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
10618 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
10619 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
10620 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
10621 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
10622 				htt_tlv_filter.offset_valid = false;
10623 
10624 				for (mac_id = 0; mac_id < max_mac_rings;
10625 								mac_id++) {
10626 					int mac_for_pdev =
10627 						dp_get_mac_id_for_pdev(mac_id,
10628 								pdev->pdev_id);
10629 
10630 					htt_h2t_rx_ring_cfg(soc->htt_handle,
10631 					mac_for_pdev,
10632 					pdev->rxdma_mon_status_ring[mac_id]
10633 					.hal_srng,
10634 					RXDMA_MONITOR_STATUS,
10635 					RX_BUFFER_SIZE_PKTLOG_LITE,
10636 					&htt_tlv_filter);
10637 				}
10638 
10639 				if (soc->reap_timer_init)
10640 					qdf_timer_mod(&soc->mon_reap_timer,
10641 					DP_INTR_POLL_TIMER_MS);
10642 			}
10643 			break;
10644 
10645 		case WDI_EVENT_LITE_T2H:
10646 			if (pdev->monitor_vdev) {
10647 				/* Nothing needs to be done if monitor mode is
10648 				 * enabled
10649 				 */
10650 				return 0;
10651 			}
10652 
10653 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
10654 				int mac_for_pdev = dp_get_mac_id_for_pdev(
10655 							mac_id,	pdev->pdev_id);
10656 
10657 				pdev->pktlog_ppdu_stats = true;
10658 				dp_h2t_cfg_stats_msg_send(pdev,
10659 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
10660 					mac_for_pdev);
10661 			}
10662 			break;
10663 
10664 		default:
10665 			/* Nothing needs to be done for other pktlog types */
10666 			break;
10667 		}
10668 	} else {
10669 		switch (event) {
10670 		case WDI_EVENT_RX_DESC:
10671 		case WDI_EVENT_LITE_RX:
10672 			if (pdev->monitor_vdev) {
10673 				/* Nothing needs to be done if monitor mode is
10674 				 * enabled
10675 				 */
10676 				return 0;
10677 			}
10678 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
10679 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
10680 
10681 				for (mac_id = 0; mac_id < max_mac_rings;
10682 								mac_id++) {
10683 					int mac_for_pdev =
10684 						dp_get_mac_id_for_pdev(mac_id,
10685 								pdev->pdev_id);
10686 
10687 					htt_h2t_rx_ring_cfg(soc->htt_handle,
10688 					  mac_for_pdev,
10689 					  pdev->rxdma_mon_status_ring[mac_id]
10690 					  .hal_srng,
10691 					  RXDMA_MONITOR_STATUS,
10692 					  RX_BUFFER_SIZE,
10693 					  &htt_tlv_filter);
10694 				}
10695 
10696 				if (soc->reap_timer_init)
10697 					qdf_timer_stop(&soc->mon_reap_timer);
10698 			}
10699 			break;
10700 		case WDI_EVENT_LITE_T2H:
10701 			if (pdev->monitor_vdev) {
10702 				/* Nothing needs to be done if monitor mode is
10703 				 * enabled
10704 				 */
10705 				return 0;
10706 			}
10707 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
10708 			 * passing value 0. Once these macros will define in htt
10709 			 * header file will use proper macros
10710 			*/
10711 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
10712 				int mac_for_pdev =
10713 						dp_get_mac_id_for_pdev(mac_id,
10714 								pdev->pdev_id);
10715 
10716 				pdev->pktlog_ppdu_stats = false;
10717 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
10718 					dp_h2t_cfg_stats_msg_send(pdev, 0,
10719 								mac_for_pdev);
10720 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
10721 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
10722 								mac_for_pdev);
10723 				} else if (pdev->enhanced_stats_en) {
10724 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
10725 								mac_for_pdev);
10726 				}
10727 			}
10728 
10729 			break;
10730 		default:
10731 			/* Nothing needs to be done for other pktlog types */
10732 			break;
10733 		}
10734 	}
10735 	return 0;
10736 }
10737 #endif
10738 
10739 /**
10740  * dp_bucket_index() - Return index from array
10741  *
10742  * @delay: delay measured
10743  * @array: array used to index corresponding delay
10744  *
10745  * Return: index
10746  */
10747 static uint8_t dp_bucket_index(uint32_t delay, uint16_t *array)
10748 {
10749 	uint8_t i = CDP_DELAY_BUCKET_0;
10750 
10751 	for (; i < CDP_DELAY_BUCKET_MAX; i++) {
10752 		if (delay >= array[i] && delay <= array[i + 1])
10753 			return i;
10754 	}
10755 
10756 	return (CDP_DELAY_BUCKET_MAX - 1);
10757 }
10758 
10759 /**
10760  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
10761  *				type of delay
10762  *
10763  * @pdev: pdev handle
10764  * @delay: delay in ms
10765  * @tid: tid value
10766  * @mode: type of tx delay mode
10767  * @ring_id: ring number
10768  * Return: pointer to cdp_delay_stats structure
10769  */
10770 static struct cdp_delay_stats *
10771 dp_fill_delay_buckets(struct dp_pdev *pdev, uint32_t delay,
10772 		      uint8_t tid, uint8_t mode, uint8_t ring_id)
10773 {
10774 	uint8_t delay_index = 0;
10775 	struct cdp_tid_tx_stats *tstats =
10776 		&pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
10777 	struct cdp_tid_rx_stats *rstats =
10778 		&pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
10779 	/*
10780 	 * cdp_fw_to_hw_delay_range
10781 	 * Fw to hw delay ranges in milliseconds
10782 	 */
10783 	uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
10784 		0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
10785 
10786 	/*
10787 	 * cdp_sw_enq_delay_range
10788 	 * Software enqueue delay ranges in milliseconds
10789 	 */
10790 	uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
10791 		0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
10792 
10793 	/*
10794 	 * cdp_intfrm_delay_range
10795 	 * Interframe delay ranges in milliseconds
10796 	 */
10797 	uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
10798 		0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
10799 
10800 	/*
10801 	 * Update delay stats in proper bucket
10802 	 */
10803 	switch (mode) {
10804 	/* Software Enqueue delay ranges */
10805 	case CDP_DELAY_STATS_SW_ENQ:
10806 
10807 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay);
10808 		tstats->swq_delay.delay_bucket[delay_index]++;
10809 		return &tstats->swq_delay;
10810 
10811 	/* Tx Completion delay ranges */
10812 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
10813 
10814 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay);
10815 		tstats->hwtx_delay.delay_bucket[delay_index]++;
10816 		return &tstats->hwtx_delay;
10817 
10818 	/* Interframe tx delay ranges */
10819 	case CDP_DELAY_STATS_TX_INTERFRAME:
10820 
10821 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
10822 		tstats->intfrm_delay.delay_bucket[delay_index]++;
10823 		return &tstats->intfrm_delay;
10824 
10825 	/* Interframe rx delay ranges */
10826 	case CDP_DELAY_STATS_RX_INTERFRAME:
10827 
10828 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
10829 		rstats->intfrm_delay.delay_bucket[delay_index]++;
10830 		return &rstats->intfrm_delay;
10831 
10832 	/* Ring reap to indication to network stack */
10833 	case CDP_DELAY_STATS_REAP_STACK:
10834 
10835 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
10836 		rstats->to_stack_delay.delay_bucket[delay_index]++;
10837 		return &rstats->to_stack_delay;
10838 	default:
10839 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
10840 			  "%s Incorrect delay mode: %d", __func__, mode);
10841 	}
10842 
10843 	return NULL;
10844 }
10845 
10846 /**
10847  * dp_update_delay_stats() - Update delay statistics in structure
10848  *				and fill min, max and avg delay
10849  *
10850  * @pdev: pdev handle
10851  * @delay: delay in ms
10852  * @tid: tid value
10853  * @mode: type of tx delay mode
10854  * @ring id: ring number
10855  * Return: none
10856  */
10857 void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
10858 			   uint8_t tid, uint8_t mode, uint8_t ring_id)
10859 {
10860 	struct cdp_delay_stats *dstats = NULL;
10861 
10862 	/*
10863 	 * Delay ranges are different for different delay modes
10864 	 * Get the correct index to update delay bucket
10865 	 */
10866 	dstats = dp_fill_delay_buckets(pdev, delay, tid, mode, ring_id);
10867 	if (qdf_unlikely(!dstats))
10868 		return;
10869 
10870 	if (delay != 0) {
10871 		/*
10872 		 * Compute minimum,average and maximum
10873 		 * delay
10874 		 */
10875 		if (delay < dstats->min_delay)
10876 			dstats->min_delay = delay;
10877 
10878 		if (delay > dstats->max_delay)
10879 			dstats->max_delay = delay;
10880 
10881 		/*
10882 		 * Average over delay measured till now
10883 		 */
10884 		if (!dstats->avg_delay)
10885 			dstats->avg_delay = delay;
10886 		else
10887 			dstats->avg_delay = ((delay + dstats->avg_delay) / 2);
10888 	}
10889 }
10890