xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision bea437e2293c3d4fb1b5704fcf633aedac996962)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_types.h"
31 #include "dp_internal.h"
32 #include "dp_tx.h"
33 #include "dp_tx_desc.h"
34 #include "dp_rx.h"
35 #include "dp_rx_mon.h"
36 #ifdef DP_RATETABLE_SUPPORT
37 #include "dp_ratetable.h"
38 #endif
39 #include <cdp_txrx_handle.h>
40 #include <wlan_cfg.h>
41 #include "cdp_txrx_cmn_struct.h"
42 #include "cdp_txrx_stats_struct.h"
43 #include "cdp_txrx_cmn_reg.h"
44 #include <qdf_util.h>
45 #include "dp_peer.h"
46 #include "dp_rx_mon.h"
47 #include "htt_stats.h"
48 #include "dp_htt.h"
49 #include "htt_ppdu_stats.h"
50 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
51 #include "cfg_ucfg_api.h"
52 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
53 #include "cdp_txrx_flow_ctrl_v2.h"
54 #else
55 static inline void
56 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
57 {
58 	return;
59 }
60 #endif
61 #include "dp_ipa.h"
62 #include "dp_cal_client_api.h"
63 #ifdef FEATURE_WDS
64 #include "dp_txrx_wds.h"
65 #endif
66 #ifdef ATH_SUPPORT_IQUE
67 #include "dp_txrx_me.h"
68 #endif
69 #if defined(DP_CON_MON)
70 #ifndef REMOVE_PKT_LOG
71 #include <pktlog_ac_api.h>
72 #include <pktlog_ac.h>
73 #endif
74 #endif
75 
76 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
77 /*
78  * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
79  * also should be updated accordingly
80  */
81 QDF_COMPILE_TIME_ASSERT(num_intr_grps,
82 			HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
83 
84 /*
85  * HIF_EVENT_HIST_MAX should always be power of 2
86  */
87 QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
88 			(HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
89 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
90 
91 /*
92  * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
93  * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
94  */
95 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
96 			WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
97 			WLAN_CFG_INT_NUM_CONTEXTS);
98 
99 #ifdef WLAN_RX_PKT_CAPTURE_ENH
100 #include "dp_rx_mon_feature.h"
101 #else
102 /*
103  * dp_config_enh_rx_capture()- API to enable/disable enhanced rx capture
104  * @pdev_handle: DP_PDEV handle
105  * @val: user provided value
106  *
107  * Return: QDF_STATUS
108  */
109 static QDF_STATUS
110 dp_config_enh_rx_capture(struct cdp_pdev *pdev_handle, uint8_t val)
111 {
112 	return QDF_STATUS_E_INVAL;
113 }
114 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
115 
116 #ifdef WLAN_TX_PKT_CAPTURE_ENH
117 #include "dp_tx_capture.h"
118 #else
119 /*
120  * dp_config_enh_tx_capture()- API to enable/disable enhanced tx capture
121  * @pdev_handle: DP_PDEV handle
122  * @val: user provided value
123  *
124  * Return: QDF_STATUS
125  */
126 static QDF_STATUS
127 dp_config_enh_tx_capture(struct cdp_pdev *pdev_handle, uint8_t val)
128 {
129 	return QDF_STATUS_E_INVAL;
130 }
131 #endif
132 
133 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
134 		  struct hif_opaque_softc *hif_handle);
135 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
136 static struct dp_soc *
137 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, HTC_HANDLE htc_handle,
138 	      qdf_device_t qdf_osdev,
139 	      struct ol_if_ops *ol_ops, uint16_t device_id);
140 static void dp_pktlogmod_exit(struct dp_pdev *handle);
141 static inline void *dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl,
142 					 uint8_t vdev_id,
143 					 uint8_t *peer_mac_addr);
144 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
145 				       uint8_t *peer_mac, uint32_t bitmap);
146 static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
147 static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
148 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
149 				bool unmap_only);
150 #ifdef ENABLE_VERBOSE_DEBUG
151 bool is_dp_verbose_debug_enabled;
152 #endif
153 
154 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
155 static void dp_cfr_filter(struct cdp_soc_t *soc_hdl,
156 			  uint8_t pdev_id,
157 			  bool enable,
158 			  struct cdp_monitor_filter *filter_val);
159 #endif
160 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
161 					    enum hal_ring_type ring_type,
162 					    int ring_num);
163 #define DP_INTR_POLL_TIMER_MS	10
164 /* Generic AST entry aging timer value */
165 #define DP_AST_AGING_TIMER_DEFAULT_MS	1000
166 #define DP_MCS_LENGTH (6*MAX_MCS)
167 
168 #define DP_CURR_FW_STATS_AVAIL 19
169 #define DP_HTT_DBG_EXT_STATS_MAX 256
170 #define DP_MAX_SLEEP_TIME 100
171 #ifndef QCA_WIFI_3_0_EMU
172 #define SUSPEND_DRAIN_WAIT 500
173 #else
174 #define SUSPEND_DRAIN_WAIT 3000
175 #endif
176 
177 #ifdef IPA_OFFLOAD
178 /* Exclude IPA rings from the interrupt context */
179 #define TX_RING_MASK_VAL	0xb
180 #define RX_RING_MASK_VAL	0x7
181 #else
182 #define TX_RING_MASK_VAL	0xF
183 #define RX_RING_MASK_VAL	0xF
184 #endif
185 
186 #define STR_MAXLEN	64
187 
188 #define RNG_ERR		"SRNG setup failed for"
189 
190 /* Threshold for peer's cached buf queue beyond which frames are dropped */
191 #define DP_RX_CACHED_BUFQ_THRESH 64
192 
193 /**
194  * default_dscp_tid_map - Default DSCP-TID mapping
195  *
196  * DSCP        TID
197  * 000000      0
198  * 001000      1
199  * 010000      2
200  * 011000      3
201  * 100000      4
202  * 101000      5
203  * 110000      6
204  * 111000      7
205  */
206 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
207 	0, 0, 0, 0, 0, 0, 0, 0,
208 	1, 1, 1, 1, 1, 1, 1, 1,
209 	2, 2, 2, 2, 2, 2, 2, 2,
210 	3, 3, 3, 3, 3, 3, 3, 3,
211 	4, 4, 4, 4, 4, 4, 4, 4,
212 	5, 5, 5, 5, 5, 5, 5, 5,
213 	6, 6, 6, 6, 6, 6, 6, 6,
214 	7, 7, 7, 7, 7, 7, 7, 7,
215 };
216 
217 /**
218  * default_pcp_tid_map - Default PCP-TID mapping
219  *
220  * PCP     TID
221  * 000      0
222  * 001      1
223  * 010      2
224  * 011      3
225  * 100      4
226  * 101      5
227  * 110      6
228  * 111      7
229  */
230 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
231 	0, 1, 2, 3, 4, 5, 6, 7,
232 };
233 
234 /**
235  * @brief Cpu to tx ring map
236  */
237 uint8_t
238 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
239 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
240 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
241 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
242 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
243 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
244 #ifdef WLAN_TX_PKT_CAPTURE_ENH
245 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
246 #endif
247 };
248 
249 /**
250  * @brief Select the type of statistics
251  */
252 enum dp_stats_type {
253 	STATS_FW = 0,
254 	STATS_HOST = 1,
255 	STATS_TYPE_MAX = 2,
256 };
257 
258 /**
259  * @brief General Firmware statistics options
260  *
261  */
262 enum dp_fw_stats {
263 	TXRX_FW_STATS_INVALID	= -1,
264 };
265 
266 /**
267  * dp_stats_mapping_table - Firmware and Host statistics
268  * currently supported
269  */
270 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
271 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
272 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
273 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
274 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
275 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
276 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
277 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
278 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
279 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
280 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
281 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
282 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
283 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
284 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
285 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
286 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
287 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
288 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
289 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
290 	/* Last ENUM for HTT FW STATS */
291 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
292 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
293 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
294 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
295 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
296 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
297 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
298 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
299 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
300 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
301 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
302 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
303 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
304 };
305 
306 /* MCL specific functions */
307 #if defined(DP_CON_MON)
308 /**
309  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
310  * @soc: pointer to dp_soc handle
311  * @intr_ctx_num: interrupt context number for which mon mask is needed
312  *
313  * For MCL, monitor mode rings are being processed in timer contexts (polled).
314  * This function is returning 0, since in interrupt mode(softirq based RX),
315  * we donot want to process monitor mode rings in a softirq.
316  *
317  * So, in case packet log is enabled for SAP/STA/P2P modes,
318  * regular interrupt processing will not process monitor mode rings. It would be
319  * done in a separate timer context.
320  *
321  * Return: 0
322  */
323 static inline
324 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
325 {
326 	return 0;
327 }
328 
329 /*
330  * dp_service_mon_rings()- timer to reap monitor rings
331  * reqd as we are not getting ppdu end interrupts
332  * @arg: SoC Handle
333  *
334  * Return:
335  *
336  */
337 static void dp_service_mon_rings(void *arg)
338 {
339 	struct dp_soc *soc = (struct dp_soc *)arg;
340 	int ring = 0, work_done, mac_id;
341 	struct dp_pdev *pdev = NULL;
342 
343 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
344 		pdev = soc->pdev_list[ring];
345 		if (!pdev)
346 			continue;
347 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
348 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
349 								pdev->pdev_id);
350 			work_done = dp_mon_process(soc, mac_for_pdev,
351 						   QCA_NAPI_BUDGET);
352 
353 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
354 				  FL("Reaped %d descs from Monitor rings"),
355 				  work_done);
356 		}
357 	}
358 
359 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
360 }
361 
362 #ifndef REMOVE_PKT_LOG
363 /**
364  * dp_pkt_log_init() - API to initialize packet log
365  * @soc_hdl: Datapath soc handle
366  * @pdev_id: id of data path pdev handle
367  * @scn: HIF context
368  *
369  * Return: none
370  */
371 void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn)
372 {
373 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
374 	struct dp_pdev *handle =
375 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
376 
377 	if (!handle) {
378 		dp_err("pdev handle is NULL");
379 		return;
380 	}
381 
382 	if (handle->pkt_log_init) {
383 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
384 			  "%s: Packet log not initialized", __func__);
385 		return;
386 	}
387 
388 	pktlog_sethandle(&handle->pl_dev, scn);
389 	pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
390 
391 	if (pktlogmod_init(scn)) {
392 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
393 			  "%s: pktlogmod_init failed", __func__);
394 		handle->pkt_log_init = false;
395 	} else {
396 		handle->pkt_log_init = true;
397 	}
398 }
399 
400 /**
401  * dp_pkt_log_con_service() - connect packet log service
402  * @soc_hdl: Datapath soc handle
403  * @pdev_id: id of data path pdev handle
404  * @scn: device context
405  *
406  * Return: none
407  */
408 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
409 				   uint8_t pdev_id, void *scn)
410 {
411 	dp_pkt_log_init(soc_hdl, pdev_id, scn);
412 	pktlog_htc_attach();
413 }
414 
415 /**
416  * dp_get_num_rx_contexts() - get number of RX contexts
417  * @soc_hdl: cdp opaque soc handle
418  *
419  * Return: number of RX contexts
420  */
421 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
422 {
423 	int i;
424 	int num_rx_contexts = 0;
425 
426 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
427 
428 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
429 		if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
430 			num_rx_contexts++;
431 
432 	return num_rx_contexts;
433 }
434 
435 /**
436  * dp_pktlogmod_exit() - API to cleanup pktlog info
437  * @pdev: Pdev handle
438  *
439  * Return: none
440  */
441 static void dp_pktlogmod_exit(struct dp_pdev *pdev)
442 {
443 	struct dp_soc *soc = pdev->soc;
444 	struct hif_opaque_softc *scn = soc->hif_handle;
445 
446 	if (!scn) {
447 		dp_err("Invalid hif(scn) handle");
448 		return;
449 	}
450 
451 	/* stop mon_reap_timer if it has been started */
452 	if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED &&
453 	    soc->reap_timer_init)
454 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
455 
456 	pktlogmod_exit(scn);
457 	pdev->pkt_log_init = false;
458 }
459 #endif
460 #else
461 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
462 
463 /**
464  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
465  * @soc: pointer to dp_soc handle
466  * @intr_ctx_num: interrupt context number for which mon mask is needed
467  *
468  * Return: mon mask value
469  */
470 static inline
471 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
472 {
473 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
474 }
475 #endif
476 
477 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
478 				 uint8_t vdev_id,
479 				 uint8_t *peer_mac,
480 				 uint8_t *mac_addr,
481 				 enum cdp_txrx_ast_entry_type type,
482 				 uint32_t flags)
483 {
484 	int ret = -1;
485 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
486 						       peer_mac, 0, vdev_id);
487 
488 	if (!peer || peer->delete_in_progress) {
489 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
490 			  "%s: Peer is NULL!\n", __func__);
491 		goto fail;
492 	}
493 
494 	ret = dp_peer_add_ast((struct dp_soc *)soc_hdl,
495 			      peer,
496 			      mac_addr,
497 			      type,
498 			      flags);
499 fail:
500 	if (peer)
501 		dp_peer_unref_delete(peer);
502 
503 	return ret;
504 }
505 
506 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
507 						uint8_t vdev_id,
508 						uint8_t *peer_mac,
509 						uint8_t *wds_macaddr,
510 						uint32_t flags)
511 {
512 	int status = -1;
513 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
514 	struct dp_ast_entry  *ast_entry = NULL;
515 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
516 						       peer_mac, 0, vdev_id);
517 
518 	if (!peer || peer->delete_in_progress) {
519 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
520 			  "%s: Peer is NULL!\n", __func__);
521 		goto fail;
522 	}
523 
524 	qdf_spin_lock_bh(&soc->ast_lock);
525 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
526 						    peer->vdev->pdev->pdev_id);
527 
528 	if (ast_entry) {
529 		status = dp_peer_update_ast(soc,
530 					    peer,
531 					    ast_entry, flags);
532 	}
533 	qdf_spin_unlock_bh(&soc->ast_lock);
534 
535 fail:
536 	if (peer)
537 		dp_peer_unref_delete(peer);
538 
539 	return status;
540 }
541 
542 /*
543  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
544  * @soc_handle:		Datapath SOC handle
545  * @wds_macaddr:	WDS entry MAC Address
546  * @peer_macaddr:	WDS entry MAC Address
547  * @vdev_id:		id of vdev handle
548  * Return: QDF_STATUS
549  */
550 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
551 					 uint8_t *wds_macaddr,
552 					 uint8_t *peer_mac_addr,
553 					 uint8_t vdev_id)
554 {
555 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
556 	struct dp_ast_entry *ast_entry = NULL;
557 	struct dp_ast_entry *tmp_ast_entry;
558 	struct dp_peer *peer;
559 	struct dp_pdev *pdev;
560 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
561 
562 	if (!vdev)
563 		return QDF_STATUS_E_FAILURE;
564 
565 	pdev = vdev->pdev;
566 
567 	if (peer_mac_addr) {
568 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
569 					      0, vdev->vdev_id);
570 		if (!peer) {
571 			return QDF_STATUS_E_FAILURE;
572 		}
573 
574 		if (peer->delete_in_progress) {
575 			dp_peer_unref_delete(peer);
576 			return QDF_STATUS_E_FAILURE;
577 		}
578 
579 		qdf_spin_lock_bh(&soc->ast_lock);
580 		DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
581 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
582 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
583 				dp_peer_del_ast(soc, ast_entry);
584 		}
585 		qdf_spin_unlock_bh(&soc->ast_lock);
586 		dp_peer_unref_delete(peer);
587 
588 		return QDF_STATUS_SUCCESS;
589 	} else if (wds_macaddr) {
590 		qdf_spin_lock_bh(&soc->ast_lock);
591 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
592 							    pdev->pdev_id);
593 
594 		if (ast_entry) {
595 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
596 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
597 				dp_peer_del_ast(soc, ast_entry);
598 		}
599 		qdf_spin_unlock_bh(&soc->ast_lock);
600 	}
601 
602 	return QDF_STATUS_SUCCESS;
603 }
604 
605 /*
606  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
607  * @soc:		Datapath SOC handle
608  *
609  * Return: QDF_STATUS
610  */
611 static QDF_STATUS
612 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
613 			     uint8_t vdev_id)
614 {
615 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
616 	struct dp_pdev *pdev;
617 	struct dp_vdev *vdev;
618 	struct dp_peer *peer;
619 	struct dp_ast_entry *ase, *temp_ase;
620 	int i;
621 
622 	qdf_spin_lock_bh(&soc->ast_lock);
623 
624 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
625 		pdev = soc->pdev_list[i];
626 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
627 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
628 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
629 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
630 					if ((ase->type ==
631 						CDP_TXRX_AST_TYPE_WDS_HM) ||
632 					    (ase->type ==
633 						CDP_TXRX_AST_TYPE_WDS_HM_SEC))
634 						dp_peer_del_ast(soc, ase);
635 				}
636 			}
637 		}
638 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
639 	}
640 
641 	qdf_spin_unlock_bh(&soc->ast_lock);
642 
643 	return QDF_STATUS_SUCCESS;
644 }
645 
646 /*
647  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
648  * @soc:		Datapath SOC handle
649  *
650  * Return: None
651  */
652 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
653 {
654 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
655 	struct dp_pdev *pdev;
656 	struct dp_vdev *vdev;
657 	struct dp_peer *peer;
658 	struct dp_ast_entry *ase, *temp_ase;
659 	int i;
660 
661 	qdf_spin_lock_bh(&soc->ast_lock);
662 
663 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
664 		pdev = soc->pdev_list[i];
665 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
666 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
667 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
668 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
669 					if ((ase->type ==
670 						CDP_TXRX_AST_TYPE_STATIC) ||
671 						(ase->type ==
672 						 CDP_TXRX_AST_TYPE_SELF) ||
673 						(ase->type ==
674 						 CDP_TXRX_AST_TYPE_STA_BSS))
675 						continue;
676 					dp_peer_del_ast(soc, ase);
677 				}
678 			}
679 		}
680 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
681 	}
682 
683 	qdf_spin_unlock_bh(&soc->ast_lock);
684 }
685 
686 /**
687  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
688  *                                       and return ast entry information
689  *                                       of first ast entry found in the
690  *                                       table with given mac address
691  *
692  * @soc : data path soc handle
693  * @ast_mac_addr : AST entry mac address
694  * @ast_entry_info : ast entry information
695  *
696  * return : true if ast entry found with ast_mac_addr
697  *          false if ast entry not found
698  */
699 static bool dp_peer_get_ast_info_by_soc_wifi3
700 	(struct cdp_soc_t *soc_hdl,
701 	 uint8_t *ast_mac_addr,
702 	 struct cdp_ast_entry_info *ast_entry_info)
703 {
704 	struct dp_ast_entry *ast_entry = NULL;
705 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
706 
707 	qdf_spin_lock_bh(&soc->ast_lock);
708 
709 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
710 	if (!ast_entry || !ast_entry->peer) {
711 		qdf_spin_unlock_bh(&soc->ast_lock);
712 		return false;
713 	}
714 	if (ast_entry->delete_in_progress && !ast_entry->callback) {
715 		qdf_spin_unlock_bh(&soc->ast_lock);
716 		return false;
717 	}
718 	ast_entry_info->type = ast_entry->type;
719 	ast_entry_info->pdev_id = ast_entry->pdev_id;
720 	ast_entry_info->vdev_id = ast_entry->peer->vdev->vdev_id;
721 	ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
722 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
723 		     &ast_entry->peer->mac_addr.raw[0],
724 		     QDF_MAC_ADDR_SIZE);
725 	qdf_spin_unlock_bh(&soc->ast_lock);
726 	return true;
727 }
728 
729 /**
730  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
731  *                                          and return ast entry information
732  *                                          if mac address and pdev_id matches
733  *
734  * @soc : data path soc handle
735  * @ast_mac_addr : AST entry mac address
736  * @pdev_id : pdev_id
737  * @ast_entry_info : ast entry information
738  *
739  * return : true if ast entry found with ast_mac_addr
740  *          false if ast entry not found
741  */
742 static bool dp_peer_get_ast_info_by_pdevid_wifi3
743 		(struct cdp_soc_t *soc_hdl,
744 		 uint8_t *ast_mac_addr,
745 		 uint8_t pdev_id,
746 		 struct cdp_ast_entry_info *ast_entry_info)
747 {
748 	struct dp_ast_entry *ast_entry;
749 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
750 
751 	qdf_spin_lock_bh(&soc->ast_lock);
752 
753 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, pdev_id);
754 
755 	if (!ast_entry || !ast_entry->peer) {
756 		qdf_spin_unlock_bh(&soc->ast_lock);
757 		return false;
758 	}
759 	if (ast_entry->delete_in_progress && !ast_entry->callback) {
760 		qdf_spin_unlock_bh(&soc->ast_lock);
761 		return false;
762 	}
763 	ast_entry_info->type = ast_entry->type;
764 	ast_entry_info->pdev_id = ast_entry->pdev_id;
765 	ast_entry_info->vdev_id = ast_entry->peer->vdev->vdev_id;
766 	ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
767 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
768 		     &ast_entry->peer->mac_addr.raw[0],
769 		     QDF_MAC_ADDR_SIZE);
770 	qdf_spin_unlock_bh(&soc->ast_lock);
771 	return true;
772 }
773 
774 /**
775  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
776  *                            with given mac address
777  *
778  * @soc : data path soc handle
779  * @ast_mac_addr : AST entry mac address
780  * @callback : callback function to called on ast delete response from FW
781  * @cookie : argument to be passed to callback
782  *
783  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
784  *          is sent
785  *          QDF_STATUS_E_INVAL false if ast entry not found
786  */
787 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
788 					       uint8_t *mac_addr,
789 					       txrx_ast_free_cb callback,
790 					       void *cookie)
791 
792 {
793 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
794 	struct dp_ast_entry *ast_entry = NULL;
795 	txrx_ast_free_cb cb = NULL;
796 	void *arg = NULL;
797 
798 	qdf_spin_lock_bh(&soc->ast_lock);
799 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
800 	if (!ast_entry) {
801 		qdf_spin_unlock_bh(&soc->ast_lock);
802 		return -QDF_STATUS_E_INVAL;
803 	}
804 
805 	if (ast_entry->callback) {
806 		cb = ast_entry->callback;
807 		arg = ast_entry->cookie;
808 	}
809 
810 	ast_entry->callback = callback;
811 	ast_entry->cookie = cookie;
812 
813 	/*
814 	 * if delete_in_progress is set AST delete is sent to target
815 	 * and host is waiting for response should not send delete
816 	 * again
817 	 */
818 	if (!ast_entry->delete_in_progress)
819 		dp_peer_del_ast(soc, ast_entry);
820 
821 	qdf_spin_unlock_bh(&soc->ast_lock);
822 	if (cb) {
823 		cb(soc->ctrl_psoc,
824 		   dp_soc_to_cdp_soc(soc),
825 		   arg,
826 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
827 	}
828 	return QDF_STATUS_SUCCESS;
829 }
830 
831 /**
832  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
833  *                                   table if mac address and pdev_id matches
834  *
835  * @soc : data path soc handle
836  * @ast_mac_addr : AST entry mac address
837  * @pdev_id : pdev id
838  * @callback : callback function to called on ast delete response from FW
839  * @cookie : argument to be passed to callback
840  *
841  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
842  *          is sent
843  *          QDF_STATUS_E_INVAL false if ast entry not found
844  */
845 
846 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
847 						uint8_t *mac_addr,
848 						uint8_t pdev_id,
849 						txrx_ast_free_cb callback,
850 						void *cookie)
851 
852 {
853 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
854 	struct dp_ast_entry *ast_entry;
855 	txrx_ast_free_cb cb = NULL;
856 	void *arg = NULL;
857 
858 	qdf_spin_lock_bh(&soc->ast_lock);
859 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
860 
861 	if (!ast_entry) {
862 		qdf_spin_unlock_bh(&soc->ast_lock);
863 		return -QDF_STATUS_E_INVAL;
864 	}
865 
866 	if (ast_entry->callback) {
867 		cb = ast_entry->callback;
868 		arg = ast_entry->cookie;
869 	}
870 
871 	ast_entry->callback = callback;
872 	ast_entry->cookie = cookie;
873 
874 	/*
875 	 * if delete_in_progress is set AST delete is sent to target
876 	 * and host is waiting for response should not sent delete
877 	 * again
878 	 */
879 	if (!ast_entry->delete_in_progress)
880 		dp_peer_del_ast(soc, ast_entry);
881 
882 	qdf_spin_unlock_bh(&soc->ast_lock);
883 
884 	if (cb) {
885 		cb(soc->ctrl_psoc,
886 		   dp_soc_to_cdp_soc(soc),
887 		   arg,
888 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
889 	}
890 	return QDF_STATUS_SUCCESS;
891 }
892 
893 /**
894  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
895  * @ring_num: ring num of the ring being queried
896  * @grp_mask: the grp_mask array for the ring type in question.
897  *
898  * The grp_mask array is indexed by group number and the bit fields correspond
899  * to ring numbers.  We are finding which interrupt group a ring belongs to.
900  *
901  * Return: the index in the grp_mask array with the ring number.
902  * -QDF_STATUS_E_NOENT if no entry is found
903  */
904 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
905 {
906 	int ext_group_num;
907 	int mask = 1 << ring_num;
908 
909 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
910 	     ext_group_num++) {
911 		if (mask & grp_mask[ext_group_num])
912 			return ext_group_num;
913 	}
914 
915 	return -QDF_STATUS_E_NOENT;
916 }
917 
918 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
919 				       enum hal_ring_type ring_type,
920 				       int ring_num)
921 {
922 	int *grp_mask;
923 
924 	switch (ring_type) {
925 	case WBM2SW_RELEASE:
926 		/* dp_tx_comp_handler - soc->tx_comp_ring */
927 		if (ring_num < 3)
928 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
929 
930 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
931 		else if (ring_num == 3) {
932 			/* sw treats this as a separate ring type */
933 			grp_mask = &soc->wlan_cfg_ctx->
934 				int_rx_wbm_rel_ring_mask[0];
935 			ring_num = 0;
936 		} else {
937 			qdf_assert(0);
938 			return -QDF_STATUS_E_NOENT;
939 		}
940 	break;
941 
942 	case REO_EXCEPTION:
943 		/* dp_rx_err_process - &soc->reo_exception_ring */
944 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
945 	break;
946 
947 	case REO_DST:
948 		/* dp_rx_process - soc->reo_dest_ring */
949 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
950 	break;
951 
952 	case REO_STATUS:
953 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
954 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
955 	break;
956 
957 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
958 	case RXDMA_MONITOR_STATUS:
959 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
960 	case RXDMA_MONITOR_DST:
961 		/* dp_mon_process */
962 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
963 	break;
964 	case RXDMA_DST:
965 		/* dp_rxdma_err_process */
966 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
967 	break;
968 
969 	case RXDMA_BUF:
970 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
971 	break;
972 
973 	case RXDMA_MONITOR_BUF:
974 		/* TODO: support low_thresh interrupt */
975 		return -QDF_STATUS_E_NOENT;
976 	break;
977 
978 	case TCL_DATA:
979 	case TCL_CMD:
980 	case REO_CMD:
981 	case SW2WBM_RELEASE:
982 	case WBM_IDLE_LINK:
983 		/* normally empty SW_TO_HW rings */
984 		return -QDF_STATUS_E_NOENT;
985 	break;
986 
987 	case TCL_STATUS:
988 	case REO_REINJECT:
989 		/* misc unused rings */
990 		return -QDF_STATUS_E_NOENT;
991 	break;
992 
993 	case CE_SRC:
994 	case CE_DST:
995 	case CE_DST_STATUS:
996 		/* CE_rings - currently handled by hif */
997 	default:
998 		return -QDF_STATUS_E_NOENT;
999 	break;
1000 	}
1001 
1002 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
1003 }
1004 
1005 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
1006 			      *ring_params, int ring_type, int ring_num)
1007 {
1008 	int msi_group_number;
1009 	int msi_data_count;
1010 	int ret;
1011 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1012 
1013 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1014 					    &msi_data_count, &msi_data_start,
1015 					    &msi_irq_start);
1016 
1017 	if (ret)
1018 		return;
1019 
1020 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
1021 						       ring_num);
1022 	if (msi_group_number < 0) {
1023 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1024 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
1025 			ring_type, ring_num);
1026 		ring_params->msi_addr = 0;
1027 		ring_params->msi_data = 0;
1028 		return;
1029 	}
1030 
1031 	if (msi_group_number > msi_data_count) {
1032 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
1033 			FL("2 msi_groups will share an msi; msi_group_num %d"),
1034 			msi_group_number);
1035 
1036 		QDF_ASSERT(0);
1037 	}
1038 
1039 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1040 
1041 	ring_params->msi_addr = addr_low;
1042 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1043 	ring_params->msi_data = (msi_group_number % msi_data_count)
1044 		+ msi_data_start;
1045 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1046 }
1047 
1048 /**
1049  * dp_print_ast_stats() - Dump AST table contents
1050  * @soc: Datapath soc handle
1051  *
1052  * return void
1053  */
1054 #ifdef FEATURE_AST
1055 void dp_print_ast_stats(struct dp_soc *soc)
1056 {
1057 	uint8_t i;
1058 	uint8_t num_entries = 0;
1059 	struct dp_vdev *vdev;
1060 	struct dp_pdev *pdev;
1061 	struct dp_peer *peer;
1062 	struct dp_ast_entry *ase, *tmp_ase;
1063 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1064 			"NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
1065 			"DA", "HMWDS_SEC"};
1066 
1067 	DP_PRINT_STATS("AST Stats:");
1068 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1069 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1070 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1071 	DP_PRINT_STATS("	Entries MAP ERR  = %d", soc->stats.ast.map_err);
1072 
1073 	DP_PRINT_STATS("AST Table:");
1074 
1075 	qdf_spin_lock_bh(&soc->ast_lock);
1076 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1077 		pdev = soc->pdev_list[i];
1078 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
1079 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1080 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1081 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1082 				    DP_PRINT_STATS("%6d mac_addr = %pM"
1083 					    " peer_mac_addr = %pM"
1084 					    " peer_id = %u"
1085 					    " type = %s"
1086 					    " next_hop = %d"
1087 					    " is_active = %d"
1088 					    " ast_idx = %d"
1089 					    " ast_hash = %d"
1090 					    " delete_in_progress = %d"
1091 					    " pdev_id = %d"
1092 					    " vdev_id = %d",
1093 					    ++num_entries,
1094 					    ase->mac_addr.raw,
1095 					    ase->peer->mac_addr.raw,
1096 					    ase->peer->peer_ids[0],
1097 					    type[ase->type],
1098 					    ase->next_hop,
1099 					    ase->is_active,
1100 					    ase->ast_idx,
1101 					    ase->ast_hash_value,
1102 					    ase->delete_in_progress,
1103 					    ase->pdev_id,
1104 					    vdev->vdev_id);
1105 				}
1106 			}
1107 		}
1108 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1109 	}
1110 	qdf_spin_unlock_bh(&soc->ast_lock);
1111 }
1112 #else
1113 void dp_print_ast_stats(struct dp_soc *soc)
1114 {
1115 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1116 	return;
1117 }
1118 #endif
1119 
1120 /**
1121  *  dp_print_peer_table() - Dump all Peer stats
1122  * @vdev: Datapath Vdev handle
1123  *
1124  * return void
1125  */
1126 static void dp_print_peer_table(struct dp_vdev *vdev)
1127 {
1128 	struct dp_peer *peer = NULL;
1129 
1130 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1131 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1132 		if (!peer) {
1133 			DP_PRINT_STATS("Invalid Peer");
1134 			return;
1135 		}
1136 		DP_PRINT_STATS("    peer_mac_addr = %pM"
1137 			       " nawds_enabled = %d"
1138 			       " bss_peer = %d"
1139 			       " wds_enabled = %d"
1140 			       " tx_cap_enabled = %d"
1141 			       " rx_cap_enabled = %d"
1142 			       " delete in progress = %d"
1143 			       " peer id = %d",
1144 			       peer->mac_addr.raw,
1145 			       peer->nawds_enabled,
1146 			       peer->bss_peer,
1147 			       peer->wds_enabled,
1148 			       peer->tx_cap_enabled,
1149 			       peer->rx_cap_enabled,
1150 			       peer->delete_in_progress,
1151 			       peer->peer_ids[0]);
1152 	}
1153 }
1154 
1155 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
1156 /**
1157  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
1158  * threshold values from the wlan_srng_cfg table for each ring type
1159  * @soc: device handle
1160  * @ring_params: per ring specific parameters
1161  * @ring_type: Ring type
1162  * @ring_num: Ring number for a given ring type
1163  *
1164  * Fill the ring params with the interrupt threshold
1165  * configuration parameters available in the per ring type wlan_srng_cfg
1166  * table.
1167  *
1168  * Return: None
1169  */
1170 static void
1171 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1172 				       struct hal_srng_params *ring_params,
1173 				       int ring_type, int ring_num,
1174 				       int num_entries)
1175 {
1176 	if (ring_type == WBM2SW_RELEASE && (ring_num == 3)) {
1177 		ring_params->intr_timer_thres_us =
1178 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1179 		ring_params->intr_batch_cntr_thres_entries =
1180 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1181 	} else {
1182 		ring_params->intr_timer_thres_us =
1183 				soc->wlan_srng_cfg[ring_type].timer_threshold;
1184 		ring_params->intr_batch_cntr_thres_entries =
1185 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
1186 	}
1187 	ring_params->low_threshold =
1188 			soc->wlan_srng_cfg[ring_type].low_threshold;
1189 
1190 	if (ring_params->low_threshold)
1191 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1192 }
1193 #else
1194 static void
1195 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1196 				       struct hal_srng_params *ring_params,
1197 				       int ring_type, int ring_num,
1198 				       int num_entries)
1199 {
1200 	if (ring_type == REO_DST) {
1201 		ring_params->intr_timer_thres_us =
1202 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1203 		ring_params->intr_batch_cntr_thres_entries =
1204 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1205 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
1206 		ring_params->intr_timer_thres_us =
1207 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1208 		ring_params->intr_batch_cntr_thres_entries =
1209 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1210 	} else {
1211 		ring_params->intr_timer_thres_us =
1212 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1213 		ring_params->intr_batch_cntr_thres_entries =
1214 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1215 	}
1216 
1217 	/* Enable low threshold interrupts for rx buffer rings (regular and
1218 	 * monitor buffer rings.
1219 	 * TODO: See if this is required for any other ring
1220 	 */
1221 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1222 	    (ring_type == RXDMA_MONITOR_STATUS)) {
1223 		/* TODO: Setting low threshold to 1/8th of ring size
1224 		 * see if this needs to be configurable
1225 		 */
1226 		ring_params->low_threshold = num_entries >> 3;
1227 		ring_params->intr_timer_thres_us =
1228 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1229 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1230 		ring_params->intr_batch_cntr_thres_entries = 0;
1231 	}
1232 }
1233 #endif
1234 
1235 /**
1236  * dp_srng_setup() - Internal function to setup SRNG rings used by data path
1237  * @soc: datapath soc handle
1238  * @srng: srng handle
1239  * @ring_type: ring that needs to be configured
1240  * @mac_id: mac number
1241  * @num_entries: Total number of entries for a given ring
1242  *
1243  * Return: non-zero - failure/zero - success
1244  */
1245 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
1246 			 int ring_type, int ring_num, int mac_id,
1247 			 uint32_t num_entries, bool cached)
1248 {
1249 	hal_soc_handle_t hal_soc = soc->hal_soc;
1250 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
1251 	/* TODO: See if we should get align size from hal */
1252 	uint32_t ring_base_align = 8;
1253 	struct hal_srng_params ring_params;
1254 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
1255 
1256 	/* TODO: Currently hal layer takes care of endianness related settings.
1257 	 * See if these settings need to passed from DP layer
1258 	 */
1259 	ring_params.flags = 0;
1260 
1261 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
1262 	srng->hal_srng = NULL;
1263 	srng->alloc_size = num_entries * entry_size;
1264 	srng->num_entries = num_entries;
1265 
1266 	if (!dp_is_soc_reinit(soc)) {
1267 		if (!cached) {
1268 			ring_params.ring_base_vaddr =
1269 			    qdf_aligned_mem_alloc_consistent(
1270 						soc->osdev, &srng->alloc_size,
1271 						&srng->base_vaddr_unaligned,
1272 						&srng->base_paddr_unaligned,
1273 						&ring_params.ring_base_paddr,
1274 						ring_base_align);
1275 		} else {
1276 			ring_params.ring_base_vaddr = qdf_aligned_malloc(
1277 					&srng->alloc_size,
1278 					&srng->base_vaddr_unaligned,
1279 					&srng->base_paddr_unaligned,
1280 					&ring_params.ring_base_paddr,
1281 					ring_base_align);
1282 		}
1283 
1284 		if (!ring_params.ring_base_vaddr) {
1285 			dp_err("alloc failed - ring_type: %d, ring_num %d",
1286 					ring_type, ring_num);
1287 			return QDF_STATUS_E_NOMEM;
1288 		}
1289 	}
1290 
1291 	ring_params.ring_base_paddr = (qdf_dma_addr_t)qdf_align(
1292 			(unsigned long)(srng->base_paddr_unaligned),
1293 			ring_base_align);
1294 
1295 	ring_params.ring_base_vaddr = (void *)(
1296 			(unsigned long)(srng->base_vaddr_unaligned) +
1297 			((unsigned long)(ring_params.ring_base_paddr) -
1298 			 (unsigned long)(srng->base_paddr_unaligned)));
1299 
1300 	qdf_assert_always(ring_params.ring_base_vaddr);
1301 
1302 	ring_params.num_entries = num_entries;
1303 
1304 	dp_verbose_debug("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
1305 			 ring_type, ring_num,
1306 			 (void *)ring_params.ring_base_vaddr,
1307 			 (void *)ring_params.ring_base_paddr,
1308 			 ring_params.num_entries);
1309 
1310 	if (soc->intr_mode == DP_INTR_MSI) {
1311 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
1312 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
1313 				 ring_type, ring_num);
1314 
1315 	} else {
1316 		ring_params.msi_data = 0;
1317 		ring_params.msi_addr = 0;
1318 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
1319 				 ring_type, ring_num);
1320 	}
1321 
1322 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
1323 					       ring_type, ring_num,
1324 					       num_entries);
1325 
1326 	if (cached) {
1327 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
1328 		srng->cached = 1;
1329 	}
1330 
1331 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
1332 		mac_id, &ring_params);
1333 
1334 	if (!srng->hal_srng) {
1335 		if (cached) {
1336 			qdf_mem_free(srng->base_vaddr_unaligned);
1337 		} else {
1338 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1339 						srng->alloc_size,
1340 						srng->base_vaddr_unaligned,
1341 						srng->base_paddr_unaligned, 0);
1342 		}
1343 	}
1344 
1345 	return 0;
1346 }
1347 
1348 /*
1349  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
1350  * @soc: DP SOC handle
1351  * @srng: source ring structure
1352  * @ring_type: type of ring
1353  * @ring_num: ring number
1354  *
1355  * Return: None
1356  */
1357 static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
1358 			   int ring_type, int ring_num)
1359 {
1360 	if (!srng->hal_srng) {
1361 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1362 			  FL("Ring type: %d, num:%d not setup"),
1363 			  ring_type, ring_num);
1364 		return;
1365 	}
1366 
1367 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1368 	srng->hal_srng = NULL;
1369 }
1370 
1371 /**
1372  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
1373  * Any buffers allocated and attached to ring entries are expected to be freed
1374  * before calling this function.
1375  */
1376 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
1377 	int ring_type, int ring_num)
1378 {
1379 	if (!dp_is_soc_reinit(soc)) {
1380 		if (!srng->hal_srng && (srng->alloc_size == 0)) {
1381 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1382 				  FL("Ring type: %d, num:%d not setup"),
1383 				  ring_type, ring_num);
1384 			return;
1385 		}
1386 
1387 		if (srng->hal_srng) {
1388 			hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1389 			srng->hal_srng = NULL;
1390 		}
1391 	}
1392 
1393 	if (srng->alloc_size && srng->base_vaddr_unaligned) {
1394 		if (!srng->cached) {
1395 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1396 						srng->alloc_size,
1397 						srng->base_vaddr_unaligned,
1398 						srng->base_paddr_unaligned, 0);
1399 		} else {
1400 			qdf_mem_free(srng->base_vaddr_unaligned);
1401 		}
1402 		srng->alloc_size = 0;
1403 		srng->base_vaddr_unaligned = NULL;
1404 	}
1405 	srng->hal_srng = NULL;
1406 }
1407 
1408 /* TODO: Need this interface from HIF */
1409 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
1410 
1411 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1412 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
1413 			 hal_ring_handle_t hal_ring_hdl)
1414 {
1415 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1416 	uint32_t hp, tp;
1417 	uint8_t ring_id;
1418 
1419 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
1420 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
1421 
1422 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
1423 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
1424 
1425 	return hal_srng_access_start(hal_soc, hal_ring_hdl);
1426 }
1427 
1428 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
1429 			hal_ring_handle_t hal_ring_hdl)
1430 {
1431 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1432 	uint32_t hp, tp;
1433 	uint8_t ring_id;
1434 
1435 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
1436 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
1437 
1438 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
1439 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
1440 
1441 	return hal_srng_access_end(hal_soc, hal_ring_hdl);
1442 }
1443 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1444 
1445 /*
1446  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1447  * @dp_ctx: DP SOC handle
1448  * @budget: Number of frames/descriptors that can be processed in one shot
1449  *
1450  * Return: remaining budget/quota for the soc device
1451  */
1452 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
1453 {
1454 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1455 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1456 	struct dp_soc *soc = int_ctx->soc;
1457 	int ring = 0;
1458 	uint32_t work_done  = 0;
1459 	int budget = dp_budget;
1460 	uint8_t tx_mask = int_ctx->tx_ring_mask;
1461 	uint8_t rx_mask = int_ctx->rx_ring_mask;
1462 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1463 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1464 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1465 	uint32_t remaining_quota = dp_budget;
1466 	struct dp_pdev *pdev = NULL;
1467 	int mac_id;
1468 
1469 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
1470 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
1471 			 reo_status_mask,
1472 			 int_ctx->rx_mon_ring_mask,
1473 			 int_ctx->host2rxdma_ring_mask,
1474 			 int_ctx->rxdma2host_ring_mask);
1475 
1476 	/* Process Tx completion interrupts first to return back buffers */
1477 	while (tx_mask) {
1478 		if (tx_mask & 0x1) {
1479 			work_done = dp_tx_comp_handler(int_ctx,
1480 						       soc,
1481 						       soc->tx_comp_ring[ring].hal_srng,
1482 						       ring, remaining_quota);
1483 
1484 			if (work_done) {
1485 				intr_stats->num_tx_ring_masks[ring]++;
1486 				dp_verbose_debug("tx mask 0x%x ring %d, budget %d, work_done %d",
1487 						 tx_mask, ring, budget,
1488 						 work_done);
1489 			}
1490 
1491 			budget -= work_done;
1492 			if (budget <= 0)
1493 				goto budget_done;
1494 
1495 			remaining_quota = budget;
1496 		}
1497 		tx_mask = tx_mask >> 1;
1498 		ring++;
1499 	}
1500 
1501 	/* Process REO Exception ring interrupt */
1502 	if (rx_err_mask) {
1503 		work_done = dp_rx_err_process(int_ctx, soc,
1504 					      soc->reo_exception_ring.hal_srng,
1505 					      remaining_quota);
1506 
1507 		if (work_done) {
1508 			intr_stats->num_rx_err_ring_masks++;
1509 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
1510 					 work_done, budget);
1511 		}
1512 
1513 		budget -=  work_done;
1514 		if (budget <= 0) {
1515 			goto budget_done;
1516 		}
1517 		remaining_quota = budget;
1518 	}
1519 
1520 	/* Process Rx WBM release ring interrupt */
1521 	if (rx_wbm_rel_mask) {
1522 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
1523 						  soc->rx_rel_ring.hal_srng,
1524 						  remaining_quota);
1525 
1526 		if (work_done) {
1527 			intr_stats->num_rx_wbm_rel_ring_masks++;
1528 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
1529 					 work_done, budget);
1530 		}
1531 
1532 		budget -=  work_done;
1533 		if (budget <= 0) {
1534 			goto budget_done;
1535 		}
1536 		remaining_quota = budget;
1537 	}
1538 
1539 	/* Process Rx interrupts */
1540 	if (rx_mask) {
1541 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1542 			if (!(rx_mask & (1 << ring)))
1543 				continue;
1544 			work_done = dp_rx_process(int_ctx,
1545 						  soc->reo_dest_ring[ring].hal_srng,
1546 						  ring,
1547 						  remaining_quota);
1548 			if (work_done) {
1549 				intr_stats->num_rx_ring_masks[ring]++;
1550 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
1551 						 rx_mask, ring,
1552 						 work_done, budget);
1553 				budget -=  work_done;
1554 				if (budget <= 0)
1555 					goto budget_done;
1556 				remaining_quota = budget;
1557 			}
1558 		}
1559 	}
1560 
1561 	if (reo_status_mask) {
1562 		if (dp_reo_status_ring_handler(int_ctx, soc))
1563 			int_ctx->intr_stats.num_reo_status_ring_masks++;
1564 	}
1565 
1566 	/* Process LMAC interrupts */
1567 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
1568 		pdev = soc->pdev_list[ring];
1569 		if (!pdev)
1570 			continue;
1571 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1572 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
1573 								pdev->pdev_id);
1574 			if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1575 				work_done = dp_mon_process(soc, mac_for_pdev,
1576 							   remaining_quota);
1577 				if (work_done)
1578 					intr_stats->num_rx_mon_ring_masks++;
1579 				budget -= work_done;
1580 				if (budget <= 0)
1581 					goto budget_done;
1582 				remaining_quota = budget;
1583 			}
1584 
1585 			if (int_ctx->rxdma2host_ring_mask &
1586 					(1 << mac_for_pdev)) {
1587 				work_done = dp_rxdma_err_process(int_ctx, soc,
1588 								 mac_for_pdev,
1589 								 remaining_quota);
1590 				if (work_done)
1591 					intr_stats->num_rxdma2host_ring_masks++;
1592 				budget -=  work_done;
1593 				if (budget <= 0)
1594 					goto budget_done;
1595 				remaining_quota = budget;
1596 			}
1597 
1598 			if (int_ctx->host2rxdma_ring_mask &
1599 						(1 << mac_for_pdev)) {
1600 				union dp_rx_desc_list_elem_t *desc_list = NULL;
1601 				union dp_rx_desc_list_elem_t *tail = NULL;
1602 				struct dp_srng *rx_refill_buf_ring =
1603 					&pdev->rx_refill_buf_ring;
1604 
1605 				intr_stats->num_host2rxdma_ring_masks++;
1606 				DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1607 						1);
1608 				dp_rx_buffers_replenish(soc, mac_for_pdev,
1609 							rx_refill_buf_ring,
1610 							&soc->rx_desc_buf[mac_for_pdev],
1611 							0, &desc_list, &tail);
1612 			}
1613 		}
1614 	}
1615 
1616 	qdf_lro_flush(int_ctx->lro_ctx);
1617 	intr_stats->num_masks++;
1618 
1619 budget_done:
1620 	return dp_budget - budget;
1621 }
1622 
1623 /* dp_interrupt_timer()- timer poll for interrupts
1624  *
1625  * @arg: SoC Handle
1626  *
1627  * Return:
1628  *
1629  */
1630 static void dp_interrupt_timer(void *arg)
1631 {
1632 	struct dp_soc *soc = (struct dp_soc *) arg;
1633 	int i;
1634 
1635 	if (qdf_atomic_read(&soc->cmn_init_done)) {
1636 		for (i = 0;
1637 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1638 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
1639 
1640 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1641 	}
1642 }
1643 
1644 /*
1645  * dp_soc_attach_poll() - Register handlers for DP interrupts
1646  * @txrx_soc: DP SOC handle
1647  *
1648  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1649  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1650  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1651  *
1652  * Return: 0 for success, nonzero for failure.
1653  */
1654 static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
1655 {
1656 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1657 	int i;
1658 
1659 	soc->intr_mode = DP_INTR_POLL;
1660 
1661 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1662 		soc->intr_ctx[i].dp_intr_id = i;
1663 		soc->intr_ctx[i].tx_ring_mask =
1664 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1665 		soc->intr_ctx[i].rx_ring_mask =
1666 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1667 		soc->intr_ctx[i].rx_mon_ring_mask =
1668 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1669 		soc->intr_ctx[i].rx_err_ring_mask =
1670 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1671 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1672 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1673 		soc->intr_ctx[i].reo_status_ring_mask =
1674 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1675 		soc->intr_ctx[i].rxdma2host_ring_mask =
1676 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1677 		soc->intr_ctx[i].soc = soc;
1678 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1679 	}
1680 
1681 	qdf_timer_init(soc->osdev, &soc->int_timer,
1682 			dp_interrupt_timer, (void *)soc,
1683 			QDF_TIMER_TYPE_WAKE_APPS);
1684 
1685 	return QDF_STATUS_SUCCESS;
1686 }
1687 
1688 /**
1689  * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
1690  * soc: DP soc handle
1691  *
1692  * Set the appropriate interrupt mode flag in the soc
1693  */
1694 static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
1695 {
1696 	uint32_t msi_base_data, msi_vector_start;
1697 	int msi_vector_count, ret;
1698 
1699 	soc->intr_mode = DP_INTR_LEGACY;
1700 
1701 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1702 	    (soc->cdp_soc.ol_ops->get_con_mode &&
1703 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
1704 		soc->intr_mode = DP_INTR_POLL;
1705 	} else {
1706 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1707 						  &msi_vector_count,
1708 						  &msi_base_data,
1709 						  &msi_vector_start);
1710 		if (ret)
1711 			return;
1712 
1713 		soc->intr_mode = DP_INTR_MSI;
1714 	}
1715 }
1716 
1717 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
1718 #if defined(DP_INTR_POLL_BOTH)
1719 /*
1720  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1721  * @txrx_soc: DP SOC handle
1722  *
1723  * Call the appropriate attach function based on the mode of operation.
1724  * This is a WAR for enabling monitor mode.
1725  *
1726  * Return: 0 for success. nonzero for failure.
1727  */
1728 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
1729 {
1730 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1731 
1732 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1733 	    (soc->cdp_soc.ol_ops->get_con_mode &&
1734 	     soc->cdp_soc.ol_ops->get_con_mode() ==
1735 	     QDF_GLOBAL_MONITOR_MODE)) {
1736 		dp_info("Poll mode");
1737 		return dp_soc_attach_poll(txrx_soc);
1738 	} else {
1739 		dp_info("Interrupt  mode");
1740 		return dp_soc_interrupt_attach(txrx_soc);
1741 	}
1742 }
1743 #else
1744 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
1745 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
1746 {
1747 	return dp_soc_attach_poll(txrx_soc);
1748 }
1749 #else
1750 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
1751 {
1752 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1753 
1754 	if (hif_is_polled_mode_enabled(soc->hif_handle))
1755 		return dp_soc_attach_poll(txrx_soc);
1756 	else
1757 		return dp_soc_interrupt_attach(txrx_soc);
1758 }
1759 #endif
1760 #endif
1761 
1762 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1763 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1764 {
1765 	int j;
1766 	int num_irq = 0;
1767 
1768 	int tx_mask =
1769 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1770 	int rx_mask =
1771 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1772 	int rx_mon_mask =
1773 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1774 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1775 					soc->wlan_cfg_ctx, intr_ctx_num);
1776 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1777 					soc->wlan_cfg_ctx, intr_ctx_num);
1778 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1779 					soc->wlan_cfg_ctx, intr_ctx_num);
1780 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1781 					soc->wlan_cfg_ctx, intr_ctx_num);
1782 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1783 					soc->wlan_cfg_ctx, intr_ctx_num);
1784 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
1785 					soc->wlan_cfg_ctx, intr_ctx_num);
1786 
1787 	soc->intr_mode = DP_INTR_LEGACY;
1788 
1789 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1790 
1791 		if (tx_mask & (1 << j)) {
1792 			irq_id_map[num_irq++] =
1793 				(wbm2host_tx_completions_ring1 - j);
1794 		}
1795 
1796 		if (rx_mask & (1 << j)) {
1797 			irq_id_map[num_irq++] =
1798 				(reo2host_destination_ring1 - j);
1799 		}
1800 
1801 		if (rxdma2host_ring_mask & (1 << j)) {
1802 			irq_id_map[num_irq++] =
1803 				rxdma2host_destination_ring_mac1 -
1804 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1805 		}
1806 
1807 		if (host2rxdma_ring_mask & (1 << j)) {
1808 			irq_id_map[num_irq++] =
1809 				host2rxdma_host_buf_ring_mac1 -
1810 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1811 		}
1812 
1813 		if (host2rxdma_mon_ring_mask & (1 << j)) {
1814 			irq_id_map[num_irq++] =
1815 				host2rxdma_monitor_ring1 -
1816 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1817 		}
1818 
1819 		if (rx_mon_mask & (1 << j)) {
1820 			irq_id_map[num_irq++] =
1821 				ppdu_end_interrupts_mac1 -
1822 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1823 			irq_id_map[num_irq++] =
1824 				rxdma2host_monitor_status_ring_mac1 -
1825 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1826 		}
1827 
1828 		if (rx_wbm_rel_ring_mask & (1 << j))
1829 			irq_id_map[num_irq++] = wbm2host_rx_release;
1830 
1831 		if (rx_err_ring_mask & (1 << j))
1832 			irq_id_map[num_irq++] = reo2host_exception;
1833 
1834 		if (reo_status_ring_mask & (1 << j))
1835 			irq_id_map[num_irq++] = reo2host_status;
1836 
1837 	}
1838 	*num_irq_r = num_irq;
1839 }
1840 
1841 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1842 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1843 		int msi_vector_count, int msi_vector_start)
1844 {
1845 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1846 					soc->wlan_cfg_ctx, intr_ctx_num);
1847 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1848 					soc->wlan_cfg_ctx, intr_ctx_num);
1849 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1850 					soc->wlan_cfg_ctx, intr_ctx_num);
1851 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1852 					soc->wlan_cfg_ctx, intr_ctx_num);
1853 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1854 					soc->wlan_cfg_ctx, intr_ctx_num);
1855 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1856 					soc->wlan_cfg_ctx, intr_ctx_num);
1857 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1858 					soc->wlan_cfg_ctx, intr_ctx_num);
1859 
1860 	unsigned int vector =
1861 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1862 	int num_irq = 0;
1863 
1864 	soc->intr_mode = DP_INTR_MSI;
1865 
1866 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1867 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1868 		irq_id_map[num_irq++] =
1869 			pld_get_msi_irq(soc->osdev->dev, vector);
1870 
1871 	*num_irq_r = num_irq;
1872 }
1873 
1874 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1875 				    int *irq_id_map, int *num_irq)
1876 {
1877 	int msi_vector_count, ret;
1878 	uint32_t msi_base_data, msi_vector_start;
1879 
1880 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1881 					    &msi_vector_count,
1882 					    &msi_base_data,
1883 					    &msi_vector_start);
1884 	if (ret)
1885 		return dp_soc_interrupt_map_calculate_integrated(soc,
1886 				intr_ctx_num, irq_id_map, num_irq);
1887 
1888 	else
1889 		dp_soc_interrupt_map_calculate_msi(soc,
1890 				intr_ctx_num, irq_id_map, num_irq,
1891 				msi_vector_count, msi_vector_start);
1892 }
1893 
1894 /*
1895  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1896  * @txrx_soc: DP SOC handle
1897  *
1898  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1899  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1900  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1901  *
1902  * Return: 0 for success. nonzero for failure.
1903  */
1904 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
1905 {
1906 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1907 
1908 	int i = 0;
1909 	int num_irq = 0;
1910 
1911 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1912 		int ret = 0;
1913 
1914 		/* Map of IRQ ids registered with one interrupt context */
1915 		int irq_id_map[HIF_MAX_GRP_IRQ];
1916 
1917 		int tx_mask =
1918 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1919 		int rx_mask =
1920 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1921 		int rx_mon_mask =
1922 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
1923 		int rx_err_ring_mask =
1924 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1925 		int rx_wbm_rel_ring_mask =
1926 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1927 		int reo_status_ring_mask =
1928 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1929 		int rxdma2host_ring_mask =
1930 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1931 		int host2rxdma_ring_mask =
1932 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1933 		int host2rxdma_mon_ring_mask =
1934 			wlan_cfg_get_host2rxdma_mon_ring_mask(
1935 				soc->wlan_cfg_ctx, i);
1936 
1937 		soc->intr_ctx[i].dp_intr_id = i;
1938 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1939 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1940 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1941 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1942 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1943 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1944 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1945 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1946 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
1947 			 host2rxdma_mon_ring_mask;
1948 
1949 		soc->intr_ctx[i].soc = soc;
1950 
1951 		num_irq = 0;
1952 
1953 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1954 					       &num_irq);
1955 
1956 		ret = hif_register_ext_group(soc->hif_handle,
1957 				num_irq, irq_id_map, dp_service_srngs,
1958 				&soc->intr_ctx[i], "dp_intr",
1959 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1960 
1961 		if (ret) {
1962 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1963 			FL("failed, ret = %d"), ret);
1964 
1965 			return QDF_STATUS_E_FAILURE;
1966 		}
1967 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1968 	}
1969 
1970 	hif_configure_ext_group_interrupts(soc->hif_handle);
1971 
1972 	return QDF_STATUS_SUCCESS;
1973 }
1974 
1975 /*
1976  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1977  * @txrx_soc: DP SOC handle
1978  *
1979  * Return: none
1980  */
1981 static void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
1982 {
1983 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1984 	int i;
1985 
1986 	if (soc->intr_mode == DP_INTR_POLL) {
1987 		qdf_timer_stop(&soc->int_timer);
1988 		qdf_timer_free(&soc->int_timer);
1989 	} else {
1990 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1991 	}
1992 
1993 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1994 		soc->intr_ctx[i].tx_ring_mask = 0;
1995 		soc->intr_ctx[i].rx_ring_mask = 0;
1996 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1997 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1998 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1999 		soc->intr_ctx[i].reo_status_ring_mask = 0;
2000 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
2001 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
2002 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
2003 
2004 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
2005 	}
2006 }
2007 
2008 #define AVG_MAX_MPDUS_PER_TID 128
2009 #define AVG_TIDS_PER_CLIENT 2
2010 #define AVG_FLOWS_PER_TID 2
2011 #define AVG_MSDUS_PER_FLOW 128
2012 #define AVG_MSDUS_PER_MPDU 4
2013 
2014 /*
2015  * Allocate and setup link descriptor pool that will be used by HW for
2016  * various link and queue descriptors and managed by WBM
2017  */
2018 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
2019 {
2020 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
2021 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
2022 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
2023 	uint32_t num_mpdus_per_link_desc =
2024 		hal_num_mpdus_per_link_desc(soc->hal_soc);
2025 	uint32_t num_msdus_per_link_desc =
2026 		hal_num_msdus_per_link_desc(soc->hal_soc);
2027 	uint32_t num_mpdu_links_per_queue_desc =
2028 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
2029 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
2030 	uint32_t total_link_descs, total_mem_size;
2031 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
2032 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
2033 	uint32_t num_link_desc_banks;
2034 	uint32_t last_bank_size = 0;
2035 	uint32_t entry_size, num_entries;
2036 	int i;
2037 	uint32_t desc_id = 0;
2038 	qdf_dma_addr_t *baseaddr = NULL;
2039 
2040 	/* Only Tx queue descriptors are allocated from common link descriptor
2041 	 * pool Rx queue descriptors are not included in this because (REO queue
2042 	 * extension descriptors) they are expected to be allocated contiguously
2043 	 * with REO queue descriptors
2044 	 */
2045 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2046 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
2047 
2048 	num_mpdu_queue_descs = num_mpdu_link_descs /
2049 		num_mpdu_links_per_queue_desc;
2050 
2051 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2052 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
2053 		num_msdus_per_link_desc;
2054 
2055 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2056 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
2057 
2058 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
2059 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
2060 
2061 	/* Round up to power of 2 */
2062 	total_link_descs = 1;
2063 	while (total_link_descs < num_entries)
2064 		total_link_descs <<= 1;
2065 
2066 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2067 		FL("total_link_descs: %u, link_desc_size: %d"),
2068 		total_link_descs, link_desc_size);
2069 	total_mem_size =  total_link_descs * link_desc_size;
2070 
2071 	total_mem_size += link_desc_align;
2072 
2073 	if (total_mem_size <= max_alloc_size) {
2074 		num_link_desc_banks = 0;
2075 		last_bank_size = total_mem_size;
2076 	} else {
2077 		num_link_desc_banks = (total_mem_size) /
2078 			(max_alloc_size - link_desc_align);
2079 		last_bank_size = total_mem_size %
2080 			(max_alloc_size - link_desc_align);
2081 	}
2082 
2083 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2084 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
2085 		total_mem_size, num_link_desc_banks);
2086 
2087 	for (i = 0; i < num_link_desc_banks; i++) {
2088 		if (!dp_is_soc_reinit(soc)) {
2089 			baseaddr = &soc->link_desc_banks[i].
2090 					base_paddr_unaligned;
2091 			soc->link_desc_banks[i].base_vaddr_unaligned =
2092 				qdf_mem_alloc_consistent(soc->osdev,
2093 							 soc->osdev->dev,
2094 							 max_alloc_size,
2095 							 baseaddr);
2096 		}
2097 		soc->link_desc_banks[i].size = max_alloc_size;
2098 
2099 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
2100 			soc->link_desc_banks[i].base_vaddr_unaligned) +
2101 			((unsigned long)(
2102 			soc->link_desc_banks[i].base_vaddr_unaligned) %
2103 			link_desc_align));
2104 
2105 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
2106 			soc->link_desc_banks[i].base_paddr_unaligned) +
2107 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
2108 			(unsigned long)(
2109 			soc->link_desc_banks[i].base_vaddr_unaligned));
2110 
2111 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
2112 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2113 				FL("Link descriptor memory alloc failed"));
2114 			goto fail;
2115 		}
2116 		if (!dp_is_soc_reinit(soc)) {
2117 			qdf_minidump_log(soc->link_desc_banks[i].base_vaddr,
2118 					 soc->link_desc_banks[i].size,
2119 					 "link_desc_bank");
2120 		}
2121 		qdf_minidump_log((soc->link_desc_banks[i].base_vaddr),
2122 				 soc->link_desc_banks[i].size,
2123 				 "link_desc_bank");
2124 	}
2125 
2126 	if (last_bank_size) {
2127 		/* Allocate last bank in case total memory required is not exact
2128 		 * multiple of max_alloc_size
2129 		 */
2130 		if (!dp_is_soc_reinit(soc)) {
2131 			baseaddr = &soc->link_desc_banks[i].
2132 					base_paddr_unaligned;
2133 			soc->link_desc_banks[i].base_vaddr_unaligned =
2134 				qdf_mem_alloc_consistent(soc->osdev,
2135 							 soc->osdev->dev,
2136 							 last_bank_size,
2137 							 baseaddr);
2138 		}
2139 		soc->link_desc_banks[i].size = last_bank_size;
2140 
2141 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
2142 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
2143 			((unsigned long)(
2144 			soc->link_desc_banks[i].base_vaddr_unaligned) %
2145 			link_desc_align));
2146 
2147 		soc->link_desc_banks[i].base_paddr =
2148 			(unsigned long)(
2149 			soc->link_desc_banks[i].base_paddr_unaligned) +
2150 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
2151 			(unsigned long)(
2152 			soc->link_desc_banks[i].base_vaddr_unaligned));
2153 
2154 		if (!dp_is_soc_reinit(soc)) {
2155 			qdf_minidump_log(soc->link_desc_banks[i].base_vaddr,
2156 					 soc->link_desc_banks[i].size,
2157 					 "link_desc_bank");
2158 		}
2159 		qdf_minidump_log((soc->link_desc_banks[i].base_vaddr),
2160 				 soc->link_desc_banks[i].size,
2161 				 "link_desc_bank");
2162 	}
2163 
2164 
2165 	/* Allocate and setup link descriptor idle list for HW internal use */
2166 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
2167 	total_mem_size = entry_size * total_link_descs;
2168 
2169 	if (total_mem_size <= max_alloc_size) {
2170 		void *desc;
2171 
2172 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
2173 				  WBM_IDLE_LINK, 0, 0, total_link_descs, 0)) {
2174 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2175 				FL("Link desc idle ring setup failed"));
2176 			goto fail;
2177 		}
2178 
2179 		qdf_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
2180 				 soc->wbm_idle_link_ring.alloc_size,
2181 				 "wbm_idle_link_ring");
2182 
2183 		hal_srng_access_start_unlocked(soc->hal_soc,
2184 			soc->wbm_idle_link_ring.hal_srng);
2185 
2186 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
2187 			soc->link_desc_banks[i].base_paddr; i++) {
2188 			uint32_t num_entries = (soc->link_desc_banks[i].size -
2189 				((unsigned long)(
2190 				soc->link_desc_banks[i].base_vaddr) -
2191 				(unsigned long)(
2192 				soc->link_desc_banks[i].base_vaddr_unaligned)))
2193 				/ link_desc_size;
2194 			unsigned long paddr = (unsigned long)(
2195 				soc->link_desc_banks[i].base_paddr);
2196 
2197 			while (num_entries && (desc = hal_srng_src_get_next(
2198 				soc->hal_soc,
2199 				soc->wbm_idle_link_ring.hal_srng))) {
2200 				hal_set_link_desc_addr(desc,
2201 					LINK_DESC_COOKIE(desc_id, i), paddr);
2202 				num_entries--;
2203 				desc_id++;
2204 				paddr += link_desc_size;
2205 			}
2206 		}
2207 		hal_srng_access_end_unlocked(soc->hal_soc,
2208 			soc->wbm_idle_link_ring.hal_srng);
2209 	} else {
2210 		uint32_t num_scatter_bufs;
2211 		uint32_t num_entries_per_buf;
2212 		uint32_t rem_entries;
2213 		uint8_t *scatter_buf_ptr;
2214 		uint16_t scatter_buf_num;
2215 		uint32_t buf_size = 0;
2216 
2217 		soc->wbm_idle_scatter_buf_size =
2218 			hal_idle_list_scatter_buf_size(soc->hal_soc);
2219 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
2220 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
2221 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
2222 					soc->hal_soc, total_mem_size,
2223 					soc->wbm_idle_scatter_buf_size);
2224 
2225 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
2226 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2227 					FL("scatter bufs size out of bounds"));
2228 			goto fail;
2229 		}
2230 
2231 		for (i = 0; i < num_scatter_bufs; i++) {
2232 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
2233 			if (!dp_is_soc_reinit(soc)) {
2234 				buf_size = soc->wbm_idle_scatter_buf_size;
2235 				soc->wbm_idle_scatter_buf_base_vaddr[i] =
2236 					qdf_mem_alloc_consistent(soc->osdev,
2237 								 soc->osdev->
2238 								 dev,
2239 								 buf_size,
2240 								 baseaddr);
2241 			}
2242 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2243 				QDF_TRACE(QDF_MODULE_ID_DP,
2244 					  QDF_TRACE_LEVEL_ERROR,
2245 					  FL("Scatter lst memory alloc fail"));
2246 				goto fail;
2247 			}
2248 		}
2249 
2250 		/* Populate idle list scatter buffers with link descriptor
2251 		 * pointers
2252 		 */
2253 		scatter_buf_num = 0;
2254 		scatter_buf_ptr = (uint8_t *)(
2255 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
2256 		rem_entries = num_entries_per_buf;
2257 
2258 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
2259 			soc->link_desc_banks[i].base_paddr; i++) {
2260 			uint32_t num_link_descs =
2261 				(soc->link_desc_banks[i].size -
2262 				((unsigned long)(
2263 				soc->link_desc_banks[i].base_vaddr) -
2264 				(unsigned long)(
2265 				soc->link_desc_banks[i].base_vaddr_unaligned)))
2266 				/ link_desc_size;
2267 			unsigned long paddr = (unsigned long)(
2268 				soc->link_desc_banks[i].base_paddr);
2269 
2270 			while (num_link_descs) {
2271 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
2272 					LINK_DESC_COOKIE(desc_id, i), paddr);
2273 				num_link_descs--;
2274 				desc_id++;
2275 				paddr += link_desc_size;
2276 				rem_entries--;
2277 				if (rem_entries) {
2278 					scatter_buf_ptr += entry_size;
2279 				} else {
2280 					rem_entries = num_entries_per_buf;
2281 					scatter_buf_num++;
2282 
2283 					if (scatter_buf_num >= num_scatter_bufs)
2284 						break;
2285 
2286 					scatter_buf_ptr = (uint8_t *)(
2287 						soc->wbm_idle_scatter_buf_base_vaddr[
2288 						scatter_buf_num]);
2289 				}
2290 			}
2291 		}
2292 		/* Setup link descriptor idle list in HW */
2293 		hal_setup_link_idle_list(soc->hal_soc,
2294 			soc->wbm_idle_scatter_buf_base_paddr,
2295 			soc->wbm_idle_scatter_buf_base_vaddr,
2296 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
2297 			(uint32_t)(scatter_buf_ptr -
2298 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
2299 			scatter_buf_num-1])), total_link_descs);
2300 	}
2301 	return 0;
2302 
2303 fail:
2304 	if (soc->wbm_idle_link_ring.hal_srng) {
2305 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
2306 				WBM_IDLE_LINK, 0);
2307 	}
2308 
2309 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2310 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2311 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2312 				soc->wbm_idle_scatter_buf_size,
2313 				soc->wbm_idle_scatter_buf_base_vaddr[i],
2314 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
2315 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
2316 		}
2317 	}
2318 
2319 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
2320 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
2321 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2322 				soc->link_desc_banks[i].size,
2323 				soc->link_desc_banks[i].base_vaddr_unaligned,
2324 				soc->link_desc_banks[i].base_paddr_unaligned,
2325 				0);
2326 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
2327 		}
2328 	}
2329 	return QDF_STATUS_E_FAILURE;
2330 }
2331 
2332 /*
2333  * Free link descriptor pool that was setup HW
2334  */
2335 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
2336 {
2337 	int i;
2338 
2339 	if (soc->wbm_idle_link_ring.hal_srng) {
2340 		qdf_minidump_remove(
2341 			soc->wbm_idle_link_ring.base_vaddr_unaligned);
2342 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
2343 			WBM_IDLE_LINK, 0);
2344 	}
2345 
2346 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2347 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2348 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2349 				soc->wbm_idle_scatter_buf_size,
2350 				soc->wbm_idle_scatter_buf_base_vaddr[i],
2351 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
2352 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
2353 		}
2354 	}
2355 
2356 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
2357 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
2358 			qdf_minidump_remove(soc->link_desc_banks[i].base_vaddr);
2359 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2360 				soc->link_desc_banks[i].size,
2361 				soc->link_desc_banks[i].base_vaddr_unaligned,
2362 				soc->link_desc_banks[i].base_paddr_unaligned,
2363 				0);
2364 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
2365 		}
2366 	}
2367 }
2368 
2369 #ifdef IPA_OFFLOAD
2370 #define REO_DST_RING_SIZE_QCA6290 1023
2371 #ifndef QCA_WIFI_QCA8074_VP
2372 #define REO_DST_RING_SIZE_QCA8074 1023
2373 #define REO_DST_RING_SIZE_QCN9000 2048
2374 #else
2375 #define REO_DST_RING_SIZE_QCA8074 8
2376 #define REO_DST_RING_SIZE_QCN9000 8
2377 #endif /* QCA_WIFI_QCA8074_VP */
2378 
2379 #else
2380 
2381 #define REO_DST_RING_SIZE_QCA6290 1024
2382 #ifndef QCA_WIFI_QCA8074_VP
2383 #define REO_DST_RING_SIZE_QCA8074 2048
2384 #define REO_DST_RING_SIZE_QCN9000 2048
2385 #else
2386 #define REO_DST_RING_SIZE_QCA8074 8
2387 #define REO_DST_RING_SIZE_QCN9000 8
2388 #endif /* QCA_WIFI_QCA8074_VP */
2389 #endif /* IPA_OFFLOAD */
2390 
2391 #ifndef FEATURE_WDS
2392 static void dp_soc_wds_attach(struct dp_soc *soc)
2393 {
2394 }
2395 
2396 static void dp_soc_wds_detach(struct dp_soc *soc)
2397 {
2398 }
2399 #endif
2400 /*
2401  * dp_soc_reset_ring_map() - Reset cpu ring map
2402  * @soc: Datapath soc handler
2403  *
2404  * This api resets the default cpu ring map
2405  */
2406 
2407 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
2408 {
2409 	uint8_t i;
2410 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2411 
2412 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
2413 		switch (nss_config) {
2414 		case dp_nss_cfg_first_radio:
2415 			/*
2416 			 * Setting Tx ring map for one nss offloaded radio
2417 			 */
2418 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
2419 			break;
2420 
2421 		case dp_nss_cfg_second_radio:
2422 			/*
2423 			 * Setting Tx ring for two nss offloaded radios
2424 			 */
2425 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
2426 			break;
2427 
2428 		case dp_nss_cfg_dbdc:
2429 			/*
2430 			 * Setting Tx ring map for 2 nss offloaded radios
2431 			 */
2432 			soc->tx_ring_map[i] =
2433 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
2434 			break;
2435 
2436 		case dp_nss_cfg_dbtc:
2437 			/*
2438 			 * Setting Tx ring map for 3 nss offloaded radios
2439 			 */
2440 			soc->tx_ring_map[i] =
2441 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
2442 			break;
2443 
2444 		default:
2445 			dp_err("tx_ring_map failed due to invalid nss cfg");
2446 			break;
2447 		}
2448 	}
2449 }
2450 
2451 /*
2452  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
2453  * @dp_soc - DP soc handle
2454  * @ring_type - ring type
2455  * @ring_num - ring_num
2456  *
2457  * return 0 or 1
2458  */
2459 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
2460 {
2461 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2462 	uint8_t status = 0;
2463 
2464 	switch (ring_type) {
2465 	case WBM2SW_RELEASE:
2466 	case REO_DST:
2467 	case RXDMA_BUF:
2468 		status = ((nss_config) & (1 << ring_num));
2469 		break;
2470 	default:
2471 		break;
2472 	}
2473 
2474 	return status;
2475 }
2476 
2477 /*
2478  * dp_soc_disable_mac2_intr_mask() - reset interrupt mask for WMAC2 hw rings
2479  * @dp_soc - DP Soc handle
2480  *
2481  * Return: Return void
2482  */
2483 static void dp_soc_disable_mac2_intr_mask(struct dp_soc *soc)
2484 {
2485 	int *grp_mask = NULL;
2486 	int group_number;
2487 
2488 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2489 	group_number = dp_srng_find_ring_in_mask(0x2, grp_mask);
2490 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2491 					  group_number, 0x0);
2492 
2493 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
2494 	group_number = dp_srng_find_ring_in_mask(0x2, grp_mask);
2495 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
2496 				      group_number, 0x0);
2497 
2498 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
2499 	group_number = dp_srng_find_ring_in_mask(0x2, grp_mask);
2500 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
2501 					  group_number, 0x0);
2502 
2503 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
2504 	group_number = dp_srng_find_ring_in_mask(0x2, grp_mask);
2505 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
2506 					      group_number, 0x0);
2507 }
2508 
2509 /*
2510  * dp_soc_reset_intr_mask() - reset interrupt mask
2511  * @dp_soc - DP Soc handle
2512  *
2513  * Return: Return void
2514  */
2515 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
2516 {
2517 	uint8_t j;
2518 	int *grp_mask = NULL;
2519 	int group_number, mask, num_ring;
2520 
2521 	/* number of tx ring */
2522 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2523 
2524 	/*
2525 	 * group mask for tx completion  ring.
2526 	 */
2527 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2528 
2529 	/* loop and reset the mask for only offloaded ring */
2530 	for (j = 0; j < num_ring; j++) {
2531 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2532 			continue;
2533 		}
2534 
2535 		/*
2536 		 * Group number corresponding to tx offloaded ring.
2537 		 */
2538 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2539 		if (group_number < 0) {
2540 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2541 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2542 					WBM2SW_RELEASE, j);
2543 			return;
2544 		}
2545 
2546 		/* reset the tx mask for offloaded ring */
2547 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2548 		mask &= (~(1 << j));
2549 
2550 		/*
2551 		 * reset the interrupt mask for offloaded ring.
2552 		 */
2553 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2554 	}
2555 
2556 	/* number of rx rings */
2557 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2558 
2559 	/*
2560 	 * group mask for reo destination ring.
2561 	 */
2562 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2563 
2564 	/* loop and reset the mask for only offloaded ring */
2565 	for (j = 0; j < num_ring; j++) {
2566 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2567 			continue;
2568 		}
2569 
2570 		/*
2571 		 * Group number corresponding to rx offloaded ring.
2572 		 */
2573 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2574 		if (group_number < 0) {
2575 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2576 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2577 					REO_DST, j);
2578 			return;
2579 		}
2580 
2581 		/* set the interrupt mask for offloaded ring */
2582 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2583 		mask &= (~(1 << j));
2584 
2585 		/*
2586 		 * set the interrupt mask to zero for rx offloaded radio.
2587 		 */
2588 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2589 	}
2590 
2591 	/*
2592 	 * group mask for Rx buffer refill ring
2593 	 */
2594 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2595 
2596 	/* loop and reset the mask for only offloaded ring */
2597 	for (j = 0; j < MAX_PDEV_CNT; j++) {
2598 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2599 			continue;
2600 		}
2601 
2602 		/*
2603 		 * Group number corresponding to rx offloaded ring.
2604 		 */
2605 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2606 		if (group_number < 0) {
2607 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2608 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2609 					REO_DST, j);
2610 			return;
2611 		}
2612 
2613 		/* set the interrupt mask for offloaded ring */
2614 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2615 				group_number);
2616 		mask &= (~(1 << j));
2617 
2618 		/*
2619 		 * set the interrupt mask to zero for rx offloaded radio.
2620 		 */
2621 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2622 			group_number, mask);
2623 	}
2624 }
2625 
2626 #ifdef IPA_OFFLOAD
2627 /**
2628  * dp_reo_remap_config() - configure reo remap register value based
2629  *                         nss configuration.
2630  *		based on offload_radio value below remap configuration
2631  *		get applied.
2632  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2633  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2634  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2635  *		3 - both Radios handled by NSS (remap not required)
2636  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2637  *
2638  * @remap1: output parameter indicates reo remap 1 register value
2639  * @remap2: output parameter indicates reo remap 2 register value
2640  * Return: bool type, true if remap is configured else false.
2641  */
2642 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap1, uint32_t *remap2)
2643 {
2644 	*remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW1, 16) |
2645 		  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 17) |
2646 		  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 18) |
2647 		  HAL_REO_REMAP_IX2(REO_REMAP_SW1, 19) |
2648 		  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 20) |
2649 		  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 21) |
2650 		  HAL_REO_REMAP_IX2(REO_REMAP_SW1, 22) |
2651 		  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 23);
2652 
2653 	*remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW3, 24) |
2654 		  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 25) |
2655 		  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 26) |
2656 		  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 27) |
2657 		  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 28) |
2658 		  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 29) |
2659 		  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 30) |
2660 		  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 31);
2661 
2662 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
2663 
2664 	return true;
2665 }
2666 #else
2667 static bool dp_reo_remap_config(struct dp_soc *soc,
2668 				uint32_t *remap1,
2669 				uint32_t *remap2)
2670 {
2671 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2672 	uint8_t target_type;
2673 
2674 	target_type = hal_get_target_type(soc->hal_soc);
2675 
2676 	switch (offload_radio) {
2677 	case dp_nss_cfg_default:
2678 		*remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW1, 16) |
2679 			  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 17) |
2680 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 18) |
2681 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 19) |
2682 			  HAL_REO_REMAP_IX2(REO_REMAP_SW1, 20) |
2683 			  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 21) |
2684 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 22) |
2685 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 23);
2686 
2687 		*remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW1, 24) |
2688 			  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 25) |
2689 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 26) |
2690 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 27) |
2691 			  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 28) |
2692 			  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 29) |
2693 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 30) |
2694 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 31);
2695 		break;
2696 	case dp_nss_cfg_first_radio:
2697 		*remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW2, 16) |
2698 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 17) |
2699 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 18) |
2700 			  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 19) |
2701 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 20) |
2702 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 21) |
2703 			  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 22) |
2704 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 23);
2705 
2706 		*remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW4, 24) |
2707 			  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 25) |
2708 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 26) |
2709 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 27) |
2710 			  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 28) |
2711 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 29) |
2712 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 30) |
2713 			  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 31);
2714 		break;
2715 	case dp_nss_cfg_second_radio:
2716 		*remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW1, 16) |
2717 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 17) |
2718 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 18) |
2719 			  HAL_REO_REMAP_IX2(REO_REMAP_SW1, 19) |
2720 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 20) |
2721 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 21) |
2722 			  HAL_REO_REMAP_IX2(REO_REMAP_SW1, 22) |
2723 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 23);
2724 
2725 		*remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW4, 24) |
2726 			  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 25) |
2727 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 26) |
2728 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 27) |
2729 			  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 28) |
2730 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 29) |
2731 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 30) |
2732 			  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 31);
2733 
2734 		break;
2735 	case dp_nss_cfg_dbdc:
2736 	case dp_nss_cfg_dbtc:
2737 		/* return false if both or all are offloaded to NSS */
2738 		return false;
2739 	}
2740 
2741 	dp_debug("remap1 %x remap2 %x offload_radio %u",
2742 		 *remap1, *remap2, offload_radio);
2743 	return true;
2744 }
2745 #endif /* IPA_OFFLOAD */
2746 
2747 /*
2748  * dp_reo_frag_dst_set() - configure reo register to set the
2749  *                        fragment destination ring
2750  * @soc : Datapath soc
2751  * @frag_dst_ring : output parameter to set fragment destination ring
2752  *
2753  * Based on offload_radio below fragment destination rings is selected
2754  * 0 - TCL
2755  * 1 - SW1
2756  * 2 - SW2
2757  * 3 - SW3
2758  * 4 - SW4
2759  * 5 - Release
2760  * 6 - FW
2761  * 7 - alternate select
2762  *
2763  * return: void
2764  */
2765 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2766 {
2767 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2768 
2769 	switch (offload_radio) {
2770 	case dp_nss_cfg_default:
2771 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2772 		break;
2773 	case dp_nss_cfg_first_radio:
2774 		/*
2775 		 * This configuration is valid for single band radio which
2776 		 * is also NSS offload.
2777 		 */
2778 	case dp_nss_cfg_dbdc:
2779 	case dp_nss_cfg_dbtc:
2780 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2781 		break;
2782 	default:
2783 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2784 				FL("dp_reo_frag_dst_set invalid offload radio config"));
2785 		break;
2786 	}
2787 }
2788 
2789 #ifdef ENABLE_VERBOSE_DEBUG
2790 static void dp_enable_verbose_debug(struct dp_soc *soc)
2791 {
2792 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2793 
2794 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2795 
2796 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
2797 		is_dp_verbose_debug_enabled = true;
2798 
2799 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
2800 		hal_set_verbose_debug(true);
2801 	else
2802 		hal_set_verbose_debug(false);
2803 }
2804 #else
2805 static void dp_enable_verbose_debug(struct dp_soc *soc)
2806 {
2807 }
2808 #endif
2809 
2810 #ifdef WLAN_FEATURE_STATS_EXT
2811 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
2812 {
2813 	qdf_event_create(&soc->rx_hw_stats_event);
2814 }
2815 #else
2816 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
2817 {
2818 }
2819 #endif
2820 
2821 /*
2822  * dp_soc_cmn_setup() - Common SoC level initializion
2823  * @soc:		Datapath SOC handle
2824  *
2825  * This is an internal function used to setup common SOC data structures,
2826  * to be called from PDEV attach after receiving HW mode capabilities from FW
2827  */
2828 static int dp_soc_cmn_setup(struct dp_soc *soc)
2829 {
2830 	int i, cached;
2831 	struct hal_reo_params reo_params;
2832 	int tx_ring_size;
2833 	int tx_comp_ring_size;
2834 	int reo_dst_ring_size;
2835 	uint32_t entries;
2836 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2837 
2838 	if (qdf_atomic_read(&soc->cmn_init_done))
2839 		return 0;
2840 
2841 	if (dp_hw_link_desc_pool_setup(soc))
2842 		goto fail1;
2843 
2844 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2845 
2846 	dp_enable_verbose_debug(soc);
2847 
2848 	/* Setup SRNG rings */
2849 	/* Common rings */
2850 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
2851 
2852 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
2853 			  entries, 0)) {
2854 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2855 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
2856 		goto fail1;
2857 	}
2858 
2859 	qdf_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
2860 			 soc->wbm_desc_rel_ring.alloc_size,
2861 			 "wbm_desc_rel_ring");
2862 
2863 	soc->num_tcl_data_rings = 0;
2864 	/* Tx data rings */
2865 	if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
2866 		soc->num_tcl_data_rings =
2867 			wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
2868 		tx_comp_ring_size =
2869 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2870 		tx_ring_size =
2871 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2872 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2873 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
2874 					  TCL_DATA, i, 0, tx_ring_size, 0)) {
2875 				QDF_TRACE(QDF_MODULE_ID_DP,
2876 					QDF_TRACE_LEVEL_ERROR,
2877 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
2878 				goto fail1;
2879 			}
2880 
2881 			/* Disable cached desc if NSS offload is enabled */
2882 			cached = WLAN_CFG_DST_RING_CACHED_DESC;
2883 			if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
2884 				cached = 0;
2885 			/*
2886 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2887 			 * count
2888 			 */
2889 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
2890 					  WBM2SW_RELEASE, i, 0,
2891 					  tx_comp_ring_size,
2892 					  cached)) {
2893 				QDF_TRACE(QDF_MODULE_ID_DP,
2894 					QDF_TRACE_LEVEL_ERROR,
2895 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
2896 				goto fail1;
2897 			}
2898 		}
2899 	} else {
2900 		/* This will be incremented during per pdev ring setup */
2901 		soc->num_tcl_data_rings = 0;
2902 	}
2903 
2904 	if (dp_tx_soc_attach(soc)) {
2905 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2906 				FL("dp_tx_soc_attach failed"));
2907 		goto fail1;
2908 	}
2909 
2910 	entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
2911 	/* TCL command and status rings */
2912 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
2913 			  entries, 0)) {
2914 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2915 			FL("dp_srng_setup failed for tcl_cmd_ring"));
2916 		goto fail2;
2917 	}
2918 
2919 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
2920 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
2921 			  entries, 0)) {
2922 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2923 			FL("dp_srng_setup failed for tcl_status_ring"));
2924 		goto fail2;
2925 	}
2926 
2927 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2928 
2929 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2930 	 * descriptors
2931 	 */
2932 
2933 	/* Rx data rings */
2934 	if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2935 		soc->num_reo_dest_rings =
2936 			wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
2937 		QDF_TRACE(QDF_MODULE_ID_DP,
2938 			QDF_TRACE_LEVEL_INFO,
2939 			FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
2940 
2941 		/* Disable cached desc if NSS offload is enabled */
2942 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
2943 		if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
2944 			cached = 0;
2945 
2946 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2947 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2948 					  i, 0, reo_dst_ring_size, cached)) {
2949 				QDF_TRACE(QDF_MODULE_ID_DP,
2950 					  QDF_TRACE_LEVEL_ERROR,
2951 					  FL(RNG_ERR "reo_dest_ring [%d]"), i);
2952 				goto fail2;
2953 			}
2954 		}
2955 	} else {
2956 		/* This will be incremented during per pdev ring setup */
2957 		soc->num_reo_dest_rings = 0;
2958 	}
2959 
2960 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2961 	/* LMAC RxDMA to SW Rings configuration */
2962 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
2963 		/* Only valid for MCL */
2964 		struct dp_pdev *pdev = soc->pdev_list[0];
2965 
2966 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2967 			if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2968 					  RXDMA_DST, 0, i, entries, 0)) {
2969 				QDF_TRACE(QDF_MODULE_ID_DP,
2970 					  QDF_TRACE_LEVEL_ERROR,
2971 					  FL(RNG_ERR "rxdma_err_dst_ring"));
2972 				goto fail2;
2973 			}
2974 		}
2975 	}
2976 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
2977 
2978 	/* REO reinjection ring */
2979 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
2980 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2981 			  entries, 0)) {
2982 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2983 			  FL("dp_srng_setup failed for reo_reinject_ring"));
2984 		goto fail2;
2985 	}
2986 
2987 
2988 	/* Rx release ring */
2989 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2990 			  wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx),
2991 			  0)) {
2992 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2993 			  FL("dp_srng_setup failed for rx_rel_ring"));
2994 		goto fail2;
2995 	}
2996 
2997 
2998 	/* Rx exception ring */
2999 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
3000 	if (dp_srng_setup(soc, &soc->reo_exception_ring,
3001 			  REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries, 0)) {
3002 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3003 			  FL("dp_srng_setup failed for reo_exception_ring"));
3004 		goto fail2;
3005 	}
3006 
3007 
3008 	/* REO command and status rings */
3009 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
3010 			  wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx),
3011 			  0)) {
3012 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3013 			FL("dp_srng_setup failed for reo_cmd_ring"));
3014 		goto fail2;
3015 	}
3016 
3017 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
3018 	TAILQ_INIT(&soc->rx.reo_cmd_list);
3019 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
3020 
3021 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
3022 			  wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx),
3023 			  0)) {
3024 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3025 			FL("dp_srng_setup failed for reo_status_ring"));
3026 		goto fail2;
3027 	}
3028 
3029 	/*
3030 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
3031 	 * WMAC2 is not there in IPQ6018 platform.
3032 	 */
3033 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018) {
3034 		dp_soc_disable_mac2_intr_mask(soc);
3035 	}
3036 
3037 	/* Reset the cpu ring map if radio is NSS offloaded */
3038 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
3039 		dp_soc_reset_cpu_ring_map(soc);
3040 		dp_soc_reset_intr_mask(soc);
3041 	}
3042 
3043 	/* Setup HW REO */
3044 	qdf_mem_zero(&reo_params, sizeof(reo_params));
3045 
3046 	if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
3047 
3048 		/*
3049 		 * Reo ring remap is not required if both radios
3050 		 * are offloaded to NSS
3051 		 */
3052 		if (!dp_reo_remap_config(soc,
3053 					&reo_params.remap1,
3054 					&reo_params.remap2))
3055 			goto out;
3056 
3057 		reo_params.rx_hash_enabled = true;
3058 	}
3059 
3060 	/* setup the global rx defrag waitlist */
3061 	TAILQ_INIT(&soc->rx.defrag.waitlist);
3062 	soc->rx.defrag.timeout_ms =
3063 		wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
3064 	soc->rx.defrag.next_flush_ms = 0;
3065 	soc->rx.flags.defrag_timeout_check =
3066 		wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
3067 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
3068 
3069 	dp_create_ext_stats_event(soc);
3070 out:
3071 	/*
3072 	 * set the fragment destination ring
3073 	 */
3074 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
3075 
3076 	hal_reo_setup(soc->hal_soc, &reo_params);
3077 
3078 	qdf_atomic_set(&soc->cmn_init_done, 1);
3079 
3080 	dp_soc_wds_attach(soc);
3081 
3082 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
3083 	return 0;
3084 fail2:
3085 	dp_tx_soc_detach(soc);
3086 fail1:
3087 	/*
3088 	 * Cleanup will be done as part of soc_detach, which will
3089 	 * be called on pdev attach failure
3090 	 */
3091 	return QDF_STATUS_E_FAILURE;
3092 }
3093 
3094 /*
3095  * dp_soc_cmn_cleanup() - Common SoC level De-initializion
3096  *
3097  * @soc: Datapath SOC handle
3098  *
3099  * This function is responsible for cleaning up DP resource of Soc
3100  * initialled in dp_pdev_attach_wifi3-->dp_soc_cmn_setup, since
3101  * dp_soc_detach_wifi3 could not identify some of them
3102  * whether they have done initialized or not accurately.
3103  *
3104  */
3105 static void dp_soc_cmn_cleanup(struct dp_soc *soc)
3106 {
3107 	if (!dp_is_soc_reinit(soc)) {
3108 		dp_tx_soc_detach(soc);
3109 	}
3110 
3111 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3112 
3113 	dp_reo_cmdlist_destroy(soc);
3114 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3115 }
3116 
3117 static QDF_STATUS
3118 dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
3119 		     int force);
3120 
3121 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3122 {
3123 	struct cdp_lro_hash_config lro_hash;
3124 	QDF_STATUS status;
3125 
3126 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3127 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
3128 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
3129 		dp_err("LRO, GRO and RX hash disabled");
3130 		return QDF_STATUS_E_FAILURE;
3131 	}
3132 
3133 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
3134 
3135 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
3136 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
3137 		lro_hash.lro_enable = 1;
3138 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
3139 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
3140 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
3141 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
3142 	}
3143 
3144 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
3145 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
3146 		 LRO_IPV4_SEED_ARR_SZ));
3147 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
3148 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
3149 		 LRO_IPV6_SEED_ARR_SZ));
3150 
3151 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
3152 
3153 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
3154 		QDF_BUG(0);
3155 		dp_err("lro_hash_config not configured");
3156 		return QDF_STATUS_E_FAILURE;
3157 	}
3158 
3159 	status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc,
3160 						      pdev->pdev_id,
3161 						      &lro_hash);
3162 	if (!QDF_IS_STATUS_SUCCESS(status)) {
3163 		dp_err("failed to send lro_hash_config to FW %u", status);
3164 		return status;
3165 	}
3166 
3167 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
3168 		lro_hash.lro_enable, lro_hash.tcp_flag,
3169 		lro_hash.tcp_flag_mask);
3170 
3171 	dp_info("toeplitz_hash_ipv4:");
3172 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3173 			   lro_hash.toeplitz_hash_ipv4,
3174 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
3175 			   LRO_IPV4_SEED_ARR_SZ));
3176 
3177 	dp_info("toeplitz_hash_ipv6:");
3178 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3179 			   lro_hash.toeplitz_hash_ipv6,
3180 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
3181 			   LRO_IPV6_SEED_ARR_SZ));
3182 
3183 	return status;
3184 }
3185 
3186 /*
3187 * dp_rxdma_ring_setup() - configure the RX DMA rings
3188 * @soc: data path SoC handle
3189 * @pdev: Physical device handle
3190 *
3191 * Return: 0 - success, > 0 - failure
3192 */
3193 #ifdef QCA_HOST2FW_RXBUF_RING
3194 static int dp_rxdma_ring_setup(struct dp_soc *soc,
3195 	 struct dp_pdev *pdev)
3196 {
3197 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3198 	int max_mac_rings;
3199 	int i;
3200 	int ring_size;
3201 
3202 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3203 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
3204 	ring_size =  wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
3205 
3206 	for (i = 0; i < max_mac_rings; i++) {
3207 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
3208 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
3209 				  RXDMA_BUF, 1, i, ring_size, 0)) {
3210 			QDF_TRACE(QDF_MODULE_ID_DP,
3211 				 QDF_TRACE_LEVEL_ERROR,
3212 				 FL("failed rx mac ring setup"));
3213 			return QDF_STATUS_E_FAILURE;
3214 		}
3215 	}
3216 	return QDF_STATUS_SUCCESS;
3217 }
3218 #else
3219 static int dp_rxdma_ring_setup(struct dp_soc *soc,
3220 	 struct dp_pdev *pdev)
3221 {
3222 	return QDF_STATUS_SUCCESS;
3223 }
3224 #endif
3225 
3226 /**
3227  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
3228  * @pdev - DP_PDEV handle
3229  *
3230  * Return: void
3231  */
3232 static inline void
3233 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
3234 {
3235 	uint8_t map_id;
3236 	struct dp_soc *soc = pdev->soc;
3237 
3238 	if (!soc)
3239 		return;
3240 
3241 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
3242 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
3243 			     default_dscp_tid_map,
3244 			     sizeof(default_dscp_tid_map));
3245 	}
3246 
3247 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
3248 		hal_tx_set_dscp_tid_map(soc->hal_soc,
3249 					default_dscp_tid_map,
3250 					map_id);
3251 	}
3252 }
3253 
3254 /**
3255  * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
3256  * @pdev - DP_PDEV handle
3257  *
3258  * Return: void
3259  */
3260 static inline void
3261 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
3262 {
3263 	struct dp_soc *soc = pdev->soc;
3264 
3265 	if (!soc)
3266 		return;
3267 
3268 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
3269 		     sizeof(default_pcp_tid_map));
3270 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
3271 }
3272 
3273 #ifdef IPA_OFFLOAD
3274 /**
3275  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
3276  * @soc: data path instance
3277  * @pdev: core txrx pdev context
3278  *
3279  * Return: QDF_STATUS_SUCCESS: success
3280  *         QDF_STATUS_E_RESOURCES: Error return
3281  */
3282 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3283 					   struct dp_pdev *pdev)
3284 {
3285 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3286 	int entries;
3287 
3288 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3289 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
3290 
3291 	/* Setup second Rx refill buffer ring */
3292 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3293 			  IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id, entries, 0)
3294 	   ) {
3295 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3296 			FL("dp_srng_setup failed second rx refill ring"));
3297 		return QDF_STATUS_E_FAILURE;
3298 	}
3299 	return QDF_STATUS_SUCCESS;
3300 }
3301 
3302 /**
3303  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
3304  * @soc: data path instance
3305  * @pdev: core txrx pdev context
3306  *
3307  * Return: void
3308  */
3309 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3310 					      struct dp_pdev *pdev)
3311 {
3312 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3313 			IPA_RX_REFILL_BUF_RING_IDX);
3314 }
3315 
3316 #else
3317 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3318 					   struct dp_pdev *pdev)
3319 {
3320 	return QDF_STATUS_SUCCESS;
3321 }
3322 
3323 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3324 					      struct dp_pdev *pdev)
3325 {
3326 }
3327 #endif
3328 
3329 #if !defined(DISABLE_MON_CONFIG)
3330 /**
3331  * dp_mon_rings_setup() - Initialize Monitor rings based on target
3332  * @soc: soc handle
3333  * @pdev: physical device handle
3334  *
3335  * Return: nonzero on failure and zero on success
3336  */
3337 static
3338 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3339 {
3340 	int mac_id = 0;
3341 	int pdev_id = pdev->pdev_id;
3342 	int entries;
3343 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3344 
3345 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3346 
3347 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3348 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
3349 
3350 		if (soc->wlan_cfg_ctx->rxdma1_enable) {
3351 			entries =
3352 			   wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
3353 			if (dp_srng_setup(soc,
3354 					  &pdev->rxdma_mon_buf_ring[mac_id],
3355 					  RXDMA_MONITOR_BUF, 0, mac_for_pdev,
3356 					  entries, 0)) {
3357 				QDF_TRACE(QDF_MODULE_ID_DP,
3358 					  QDF_TRACE_LEVEL_ERROR,
3359 					  FL(RNG_ERR "rxdma_mon_buf_ring "));
3360 				return QDF_STATUS_E_NOMEM;
3361 			}
3362 
3363 			entries =
3364 			   wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
3365 			if (dp_srng_setup(soc,
3366 					  &pdev->rxdma_mon_dst_ring[mac_id],
3367 					  RXDMA_MONITOR_DST, 0, mac_for_pdev,
3368 					  entries, 0)) {
3369 				QDF_TRACE(QDF_MODULE_ID_DP,
3370 					  QDF_TRACE_LEVEL_ERROR,
3371 					  FL(RNG_ERR "rxdma_mon_dst_ring"));
3372 				return QDF_STATUS_E_NOMEM;
3373 			}
3374 
3375 			entries =
3376 			    wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3377 			if (dp_srng_setup(soc,
3378 					  &pdev->rxdma_mon_status_ring[mac_id],
3379 					  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
3380 					  entries, 0)) {
3381 				QDF_TRACE(QDF_MODULE_ID_DP,
3382 					  QDF_TRACE_LEVEL_ERROR,
3383 					  FL(RNG_ERR "rxdma_mon_status_ring"));
3384 				return QDF_STATUS_E_NOMEM;
3385 			}
3386 
3387 			entries =
3388 			   wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
3389 			if (dp_srng_setup(soc,
3390 					  &pdev->rxdma_mon_desc_ring[mac_id],
3391 					  RXDMA_MONITOR_DESC, 0, mac_for_pdev,
3392 					  entries, 0)) {
3393 				QDF_TRACE(QDF_MODULE_ID_DP,
3394 					  QDF_TRACE_LEVEL_ERROR,
3395 					  FL(RNG_ERR "rxdma_mon_desc_ring"));
3396 				return QDF_STATUS_E_NOMEM;
3397 			}
3398 		} else {
3399 			entries =
3400 			   wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3401 			if (dp_srng_setup(soc,
3402 					  &pdev->rxdma_mon_status_ring[mac_id],
3403 					  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
3404 					  entries, 0)) {
3405 				QDF_TRACE(QDF_MODULE_ID_DP,
3406 					  QDF_TRACE_LEVEL_ERROR,
3407 					  FL(RNG_ERR "rxdma_mon_status_ring"));
3408 				return QDF_STATUS_E_NOMEM;
3409 			}
3410 		}
3411 	}
3412 
3413 	return QDF_STATUS_SUCCESS;
3414 }
3415 #else
3416 static
3417 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3418 {
3419 	return QDF_STATUS_SUCCESS;
3420 }
3421 #endif
3422 
3423 /*dp_iterate_update_peer_list - update peer stats on cal client timer
3424  * @pdev_hdl: pdev handle
3425  */
3426 #ifdef ATH_SUPPORT_EXT_STAT
3427 void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
3428 {
3429 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
3430 	struct dp_soc *soc = pdev->soc;
3431 	struct dp_vdev *vdev = NULL;
3432 	struct dp_peer *peer = NULL;
3433 
3434 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3435 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3436 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
3437 		DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
3438 			dp_cal_client_update_peer_stats(&peer->stats);
3439 		}
3440 	}
3441 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3442 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3443 }
3444 #else
3445 void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
3446 {
3447 }
3448 #endif
3449 
3450 /*
3451  * dp_htt_ppdu_stats_attach() - attach resources for HTT PPDU stats processing
3452  * @pdev: Datapath PDEV handle
3453  *
3454  * Return: QDF_STATUS_SUCCESS: Success
3455  *         QDF_STATUS_E_NOMEM: Error
3456  */
3457 static QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev)
3458 {
3459 	pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE);
3460 
3461 	if (!pdev->ppdu_tlv_buf) {
3462 		QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail");
3463 		return QDF_STATUS_E_NOMEM;
3464 	}
3465 
3466 	return QDF_STATUS_SUCCESS;
3467 }
3468 
3469 /*
3470 * dp_pdev_attach_wifi3() - attach txrx pdev
3471 * @txrx_soc: Datapath SOC handle
3472 * @htc_handle: HTC handle for host-target interface
3473 * @qdf_osdev: QDF OS device
3474 * @pdev_id: PDEV ID
3475 *
3476 * Return: DP PDEV handle on success, NULL on failure
3477 */
3478 static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
3479 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
3480 {
3481 	int ring_size;
3482 	int entries;
3483 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3484 	int nss_cfg;
3485 	void *sojourn_buf;
3486 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3487 	struct dp_pdev *pdev = NULL;
3488 
3489 	if (dp_is_soc_reinit(soc)) {
3490 		pdev = soc->pdev_list[pdev_id];
3491 	} else {
3492 		pdev = qdf_mem_malloc(sizeof(*pdev));
3493 		qdf_minidump_log(pdev, sizeof(*pdev), "dp_pdev");
3494 	}
3495 
3496 	if (!pdev) {
3497 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3498 			FL("DP PDEV memory allocation failed"));
3499 		goto fail0;
3500 	}
3501 
3502 	/*
3503 	 * Variable to prevent double pdev deinitialization during
3504 	 * radio detach execution .i.e. in the absence of any vdev.
3505 	 */
3506 	pdev->pdev_deinit = 0;
3507 	pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
3508 
3509 	if (!pdev->invalid_peer) {
3510 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3511 			  FL("Invalid peer memory allocation failed"));
3512 		qdf_mem_free(pdev);
3513 		goto fail0;
3514 	}
3515 
3516 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3517 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
3518 
3519 	if (!pdev->wlan_cfg_ctx) {
3520 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3521 			FL("pdev cfg_attach failed"));
3522 
3523 		qdf_mem_free(pdev->invalid_peer);
3524 		qdf_mem_free(pdev);
3525 		goto fail0;
3526 	}
3527 
3528 	/*
3529 	 * set nss pdev config based on soc config
3530 	 */
3531 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
3532 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
3533 			(nss_cfg & (1 << pdev_id)));
3534 
3535 	pdev->soc = soc;
3536 	pdev->pdev_id = pdev_id;
3537 	soc->pdev_list[pdev_id] = pdev;
3538 
3539 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
3540 	soc->pdev_count++;
3541 
3542 	TAILQ_INIT(&pdev->vdev_list);
3543 	qdf_spinlock_create(&pdev->vdev_list_lock);
3544 	pdev->vdev_count = 0;
3545 
3546 	qdf_spinlock_create(&pdev->tx_mutex);
3547 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
3548 	TAILQ_INIT(&pdev->neighbour_peers_list);
3549 	pdev->neighbour_peers_added = false;
3550 	pdev->monitor_configured = false;
3551 
3552 	if (dp_soc_cmn_setup(soc)) {
3553 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3554 			FL("dp_soc_cmn_setup failed"));
3555 		goto fail1;
3556 	}
3557 
3558 	/* Setup per PDEV TCL rings if configured */
3559 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3560 		ring_size =
3561 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
3562 
3563 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
3564 				  pdev_id, pdev_id, ring_size, 0)) {
3565 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3566 				FL("dp_srng_setup failed for tcl_data_ring"));
3567 			goto fail1;
3568 		}
3569 
3570 		ring_size =
3571 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
3572 
3573 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
3574 				  WBM2SW_RELEASE, pdev_id, pdev_id,
3575 				  ring_size, 0)) {
3576 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3577 				FL("dp_srng_setup failed for tx_comp_ring"));
3578 			goto fail1;
3579 		}
3580 		soc->num_tcl_data_rings++;
3581 	}
3582 
3583 	/* Tx specific init */
3584 	if (dp_tx_pdev_attach(pdev)) {
3585 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3586 			FL("dp_tx_pdev_attach failed"));
3587 		goto fail1;
3588 	}
3589 
3590 	ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
3591 	/* Setup per PDEV REO rings if configured */
3592 	if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
3593 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
3594 				  pdev_id, pdev_id, ring_size, 0)) {
3595 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3596 				FL("dp_srng_setup failed for reo_dest_ringn"));
3597 			goto fail1;
3598 		}
3599 		soc->num_reo_dest_rings++;
3600 	}
3601 
3602 	ring_size =
3603 		wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc->wlan_cfg_ctx);
3604 
3605 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
3606 			  ring_size, 0)) {
3607 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3608 			 FL("dp_srng_setup failed rx refill ring"));
3609 		goto fail1;
3610 	}
3611 
3612 	if (dp_rxdma_ring_setup(soc, pdev)) {
3613 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3614 			 FL("RXDMA ring config failed"));
3615 		goto fail1;
3616 	}
3617 
3618 	if (dp_mon_rings_setup(soc, pdev)) {
3619 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3620 			  FL("MONITOR rings setup failed"));
3621 		goto fail1;
3622 	}
3623 
3624 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
3625 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
3626 		if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
3627 				  0, pdev_id, entries, 0)) {
3628 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3629 				  FL(RNG_ERR "rxdma_err_dst_ring"));
3630 			goto fail1;
3631 		}
3632 	}
3633 
3634 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
3635 		goto fail1;
3636 
3637 	if (dp_ipa_ring_resource_setup(soc, pdev))
3638 		goto fail1;
3639 
3640 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
3641 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3642 			FL("dp_ipa_uc_attach failed"));
3643 		goto fail1;
3644 	}
3645 
3646 	/* Rx specific init */
3647 	if (dp_rx_pdev_attach(pdev)) {
3648 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3649 			  FL("dp_rx_pdev_attach failed"));
3650 		goto fail2;
3651 	}
3652 
3653 	DP_STATS_INIT(pdev);
3654 
3655 	/* Monitor filter init */
3656 	pdev->mon_filter_mode = MON_FILTER_ALL;
3657 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
3658 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
3659 	pdev->fp_data_filter = FILTER_DATA_ALL;
3660 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
3661 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
3662 	pdev->mo_data_filter = FILTER_DATA_ALL;
3663 
3664 	dp_local_peer_id_pool_init(pdev);
3665 
3666 	dp_dscp_tid_map_setup(pdev);
3667 	dp_pcp_tid_map_setup(pdev);
3668 
3669 	/* Rx monitor mode specific init */
3670 	if (dp_rx_pdev_mon_attach(pdev)) {
3671 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3672 				"dp_rx_pdev_mon_attach failed");
3673 		goto fail2;
3674 	}
3675 
3676 	if (dp_wdi_event_attach(pdev)) {
3677 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3678 				"dp_wdi_evet_attach failed");
3679 		goto wdi_attach_fail;
3680 	}
3681 
3682 	/* set the reo destination during initialization */
3683 	pdev->reo_dest = pdev->pdev_id + 1;
3684 
3685 	/*
3686 	 * initialize ppdu tlv list
3687 	 */
3688 	TAILQ_INIT(&pdev->ppdu_info_list);
3689 	pdev->tlv_count = 0;
3690 	pdev->list_depth = 0;
3691 
3692 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
3693 
3694 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
3695 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
3696 			      TRUE);
3697 
3698 	if (pdev->sojourn_buf) {
3699 		sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
3700 		qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
3701 	}
3702 	/* initlialize cal client timer */
3703 	dp_cal_client_attach(&pdev->cal_client_ctx,
3704 			     dp_pdev_to_cdp_pdev(pdev),
3705 			     pdev->soc->osdev,
3706 			     &dp_iterate_update_peer_list);
3707 	qdf_event_create(&pdev->fw_peer_stats_event);
3708 
3709 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3710 
3711 	dp_init_tso_stats(pdev);
3712 
3713 	if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS)
3714 		goto fail1;
3715 
3716 	dp_tx_ppdu_stats_attach(pdev);
3717 
3718 	return (struct cdp_pdev *)pdev;
3719 
3720 wdi_attach_fail:
3721 	/*
3722 	 * dp_mon_link_desc_pool_cleanup is done in dp_pdev_detach
3723 	 * and hence need not to be done here.
3724 	 */
3725 	dp_rx_pdev_mon_detach(pdev);
3726 
3727 fail2:
3728 	dp_rx_pdev_detach(pdev);
3729 
3730 fail1:
3731 	if (pdev->invalid_peer)
3732 		qdf_mem_free(pdev->invalid_peer);
3733 	dp_pdev_detach((struct cdp_pdev *)pdev, 0);
3734 
3735 fail0:
3736 	return NULL;
3737 }
3738 
3739 /*
3740 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
3741 * @soc: data path SoC handle
3742 * @pdev: Physical device handle
3743 *
3744 * Return: void
3745 */
3746 #ifdef QCA_HOST2FW_RXBUF_RING
3747 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3748 	 struct dp_pdev *pdev)
3749 {
3750 	int i;
3751 
3752 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
3753 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
3754 			 RXDMA_BUF, 1);
3755 
3756 	if (soc->reap_timer_init) {
3757 		qdf_timer_free(&soc->mon_reap_timer);
3758 		soc->reap_timer_init = 0;
3759 	}
3760 }
3761 #else
3762 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3763 	 struct dp_pdev *pdev)
3764 {
3765 }
3766 #endif
3767 
3768 /*
3769  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3770  * @pdev: device object
3771  *
3772  * Return: void
3773  */
3774 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3775 {
3776 	struct dp_neighbour_peer *peer = NULL;
3777 	struct dp_neighbour_peer *temp_peer = NULL;
3778 
3779 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3780 			neighbour_peer_list_elem, temp_peer) {
3781 		/* delete this peer from the list */
3782 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
3783 				peer, neighbour_peer_list_elem);
3784 		qdf_mem_free(peer);
3785 	}
3786 
3787 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3788 }
3789 
3790 /**
3791 * dp_htt_ppdu_stats_detach() - detach stats resources
3792 * @pdev: Datapath PDEV handle
3793 *
3794 * Return: void
3795 */
3796 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3797 {
3798 	struct ppdu_info *ppdu_info, *ppdu_info_next;
3799 
3800 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3801 			ppdu_info_list_elem, ppdu_info_next) {
3802 		if (!ppdu_info)
3803 			break;
3804 		qdf_assert_always(ppdu_info->nbuf);
3805 		qdf_nbuf_free(ppdu_info->nbuf);
3806 		qdf_mem_free(ppdu_info);
3807 	}
3808 
3809 	if (pdev->ppdu_tlv_buf)
3810 		qdf_mem_free(pdev->ppdu_tlv_buf);
3811 
3812 }
3813 
3814 #if !defined(DISABLE_MON_CONFIG)
3815 
3816 static
3817 void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
3818 			 int mac_id)
3819 {
3820 		if (soc->wlan_cfg_ctx->rxdma1_enable) {
3821 			dp_srng_cleanup(soc,
3822 					&pdev->rxdma_mon_buf_ring[mac_id],
3823 					RXDMA_MONITOR_BUF, 0);
3824 
3825 			dp_srng_cleanup(soc,
3826 					&pdev->rxdma_mon_dst_ring[mac_id],
3827 					RXDMA_MONITOR_DST, 0);
3828 
3829 			dp_srng_cleanup(soc,
3830 					&pdev->rxdma_mon_status_ring[mac_id],
3831 					RXDMA_MONITOR_STATUS, 0);
3832 
3833 			dp_srng_cleanup(soc,
3834 					&pdev->rxdma_mon_desc_ring[mac_id],
3835 					RXDMA_MONITOR_DESC, 0);
3836 
3837 			dp_srng_cleanup(soc,
3838 					&pdev->rxdma_err_dst_ring[mac_id],
3839 					RXDMA_DST, 0);
3840 		} else {
3841 			dp_srng_cleanup(soc,
3842 					&pdev->rxdma_mon_status_ring[mac_id],
3843 					RXDMA_MONITOR_STATUS, 0);
3844 
3845 			dp_srng_cleanup(soc,
3846 					&pdev->rxdma_err_dst_ring[mac_id],
3847 					RXDMA_DST, 0);
3848 		}
3849 
3850 }
3851 #else
3852 static void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
3853 				int mac_id)
3854 {
3855 }
3856 #endif
3857 
3858 /**
3859  * dp_mon_ring_deinit() - Placeholder to deinitialize Monitor rings
3860  *
3861  * @soc: soc handle
3862  * @pdev: datapath physical dev handle
3863  * @mac_id: mac number
3864  *
3865  * Return: None
3866  */
3867 static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3868 			       int mac_id)
3869 {
3870 }
3871 
3872 /**
3873  * dp_pdev_mem_reset() - Reset txrx pdev memory
3874  * @pdev: dp pdev handle
3875  *
3876  * Return: None
3877  */
3878 static void dp_pdev_mem_reset(struct dp_pdev *pdev)
3879 {
3880 	uint16_t len = 0;
3881 	uint8_t *dp_pdev_offset = (uint8_t *)pdev;
3882 
3883 	len = sizeof(struct dp_pdev) -
3884 		offsetof(struct dp_pdev, pdev_deinit) -
3885 		sizeof(pdev->pdev_deinit);
3886 	dp_pdev_offset = dp_pdev_offset +
3887 			 offsetof(struct dp_pdev, pdev_deinit) +
3888 			 sizeof(pdev->pdev_deinit);
3889 
3890 	qdf_mem_zero(dp_pdev_offset, len);
3891 }
3892 
3893 #ifdef WLAN_DP_PENDING_MEM_FLUSH
3894 /**
3895  * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
3896  * @pdev: Datapath PDEV handle
3897  *
3898  * This is the last chance to flush all pending dp vdevs/peers,
3899  * some peer/vdev leak case like Non-SSR + peer unmap missing
3900  * will be covered here.
3901  *
3902  * Return: None
3903  */
3904 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
3905 {
3906 	struct dp_vdev *vdev = NULL;
3907 
3908 	while (true) {
3909 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
3910 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
3911 			if (vdev->delete.pending)
3912 				break;
3913 		}
3914 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3915 
3916 		/*
3917 		 * vdev will be freed when all peers get cleanup,
3918 		 * dp_delete_pending_vdev will remove vdev from vdev_list
3919 		 * in pdev.
3920 		 */
3921 		if (vdev)
3922 			dp_vdev_flush_peers((struct cdp_vdev *)vdev, 0);
3923 		else
3924 			break;
3925 	}
3926 }
3927 #else
3928 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
3929 {
3930 }
3931 #endif
3932 
3933 /**
3934  * dp_pdev_deinit() - Deinit txrx pdev
3935  * @txrx_pdev: Datapath PDEV handle
3936  * @force: Force deinit
3937  *
3938  * Return: None
3939  */
3940 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
3941 {
3942 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3943 	struct dp_soc *soc = pdev->soc;
3944 	qdf_nbuf_t curr_nbuf, next_nbuf;
3945 	int mac_id;
3946 
3947 	/*
3948 	 * Prevent double pdev deinitialization during radio detach
3949 	 * execution .i.e. in the absence of any vdev
3950 	 */
3951 	if (pdev->pdev_deinit)
3952 		return;
3953 
3954 	pdev->pdev_deinit = 1;
3955 
3956 	dp_wdi_event_detach(pdev);
3957 
3958 	dp_pdev_flush_pending_vdevs(pdev);
3959 
3960 	dp_tx_pdev_detach(pdev);
3961 
3962 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3963 		dp_srng_deinit(soc, &soc->tcl_data_ring[pdev->pdev_id],
3964 			       TCL_DATA, pdev->pdev_id);
3965 		dp_srng_deinit(soc, &soc->tx_comp_ring[pdev->pdev_id],
3966 			       WBM2SW_RELEASE, pdev->pdev_id);
3967 	}
3968 
3969 	dp_pktlogmod_exit(pdev);
3970 
3971 	dp_rx_fst_detach(soc, pdev);
3972 	dp_rx_pdev_detach(pdev);
3973 	dp_rx_pdev_mon_detach(pdev);
3974 	dp_neighbour_peers_detach(pdev);
3975 	qdf_spinlock_destroy(&pdev->tx_mutex);
3976 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
3977 
3978 	dp_ipa_uc_detach(soc, pdev);
3979 
3980 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
3981 
3982 	/* Cleanup per PDEV REO rings if configured */
3983 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3984 		dp_srng_deinit(soc, &soc->reo_dest_ring[pdev->pdev_id],
3985 			       REO_DST, pdev->pdev_id);
3986 	}
3987 
3988 	dp_srng_deinit(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
3989 
3990 	dp_rxdma_ring_cleanup(soc, pdev);
3991 
3992 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3993 		dp_mon_ring_deinit(soc, pdev, mac_id);
3994 		dp_srng_deinit(soc, &pdev->rxdma_err_dst_ring[mac_id],
3995 			       RXDMA_DST, 0);
3996 	}
3997 
3998 	curr_nbuf = pdev->invalid_peer_head_msdu;
3999 	while (curr_nbuf) {
4000 		next_nbuf = qdf_nbuf_next(curr_nbuf);
4001 		qdf_nbuf_free(curr_nbuf);
4002 		curr_nbuf = next_nbuf;
4003 	}
4004 	pdev->invalid_peer_head_msdu = NULL;
4005 	pdev->invalid_peer_tail_msdu = NULL;
4006 
4007 	dp_htt_ppdu_stats_detach(pdev);
4008 
4009 	dp_tx_ppdu_stats_detach(pdev);
4010 
4011 	qdf_nbuf_free(pdev->sojourn_buf);
4012 	qdf_nbuf_queue_free(&pdev->rx_ppdu_buf_q);
4013 
4014 	dp_cal_client_detach(&pdev->cal_client_ctx);
4015 
4016 	soc->pdev_count--;
4017 
4018 	/* only do soc common cleanup when last pdev do detach */
4019 	if (!(soc->pdev_count))
4020 		dp_soc_cmn_cleanup(soc);
4021 
4022 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
4023 	if (pdev->invalid_peer)
4024 		qdf_mem_free(pdev->invalid_peer);
4025 	qdf_mem_free(pdev->dp_txrx_handle);
4026 	dp_pdev_mem_reset(pdev);
4027 }
4028 
4029 /**
4030  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
4031  * @psoc: Datapath psoc handle
4032  * @pdev_id: Id of datapath PDEV handle
4033  * @force: Force deinit
4034  *
4035  * Return: QDF_STATUS
4036  */
4037 static QDF_STATUS
4038 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
4039 		     int force)
4040 {
4041 	struct dp_soc *soc = (struct dp_soc *)psoc;
4042 	struct dp_pdev *txrx_pdev =
4043 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
4044 						   pdev_id);
4045 
4046 	if (!txrx_pdev)
4047 		return QDF_STATUS_E_FAILURE;
4048 
4049 	soc->dp_soc_reinit = TRUE;
4050 
4051 	dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
4052 
4053 	return QDF_STATUS_SUCCESS;
4054 }
4055 
4056 /*
4057  * dp_pdev_detach() - Complete rest of pdev detach
4058  * @txrx_pdev: Datapath PDEV handle
4059  * @force: Force deinit
4060  *
4061  * Return: None
4062  */
4063 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
4064 {
4065 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4066 	struct dp_soc *soc = pdev->soc;
4067 	struct rx_desc_pool *rx_desc_pool;
4068 	int mac_id, mac_for_pdev;
4069 
4070 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
4071 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
4072 				TCL_DATA, pdev->pdev_id);
4073 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
4074 				WBM2SW_RELEASE, pdev->pdev_id);
4075 	}
4076 
4077 	dp_mon_link_free(pdev);
4078 
4079 	/* Cleanup per PDEV REO rings if configured */
4080 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
4081 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
4082 				REO_DST, pdev->pdev_id);
4083 	}
4084 	dp_rxdma_ring_cleanup(soc, pdev);
4085 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
4086 
4087 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
4088 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
4089 
4090 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4091 		dp_mon_ring_cleanup(soc, pdev, mac_id);
4092 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
4093 				RXDMA_DST, 0);
4094 		if (dp_is_soc_reinit(soc)) {
4095 			mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4096 							      pdev->pdev_id);
4097 			rx_desc_pool = &soc->rx_desc_status[mac_for_pdev];
4098 			dp_rx_desc_pool_free(soc, rx_desc_pool);
4099 			rx_desc_pool = &soc->rx_desc_mon[mac_for_pdev];
4100 			dp_rx_desc_pool_free(soc, rx_desc_pool);
4101 		}
4102 	}
4103 
4104 	if (dp_is_soc_reinit(soc)) {
4105 		rx_desc_pool = &soc->rx_desc_buf[pdev->pdev_id];
4106 		dp_rx_desc_pool_free(soc, rx_desc_pool);
4107 	}
4108 
4109 	soc->pdev_list[pdev->pdev_id] = NULL;
4110 	qdf_minidump_remove(pdev);
4111 	qdf_mem_free(pdev);
4112 }
4113 
4114 /*
4115  * dp_pdev_detach_wifi3() - detach txrx pdev
4116  * @psoc: Datapath soc handle
4117  * @pdev_id: pdev id of pdev
4118  * @force: Force detach
4119  *
4120  * Return: QDF_STATUS
4121  */
4122 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
4123 				       int force)
4124 {
4125 	struct dp_soc *soc = (struct dp_soc *)psoc;
4126 	struct dp_pdev *txrx_pdev =
4127 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
4128 						   pdev_id);
4129 
4130 	if (!txrx_pdev) {
4131 		dp_err("Couldn't find dp pdev");
4132 		return QDF_STATUS_E_FAILURE;
4133 	}
4134 
4135 	if (dp_is_soc_reinit(soc)) {
4136 		dp_pdev_detach((struct cdp_pdev *)txrx_pdev, force);
4137 	} else {
4138 		dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
4139 		dp_pdev_detach((struct cdp_pdev *)txrx_pdev, force);
4140 	}
4141 
4142 	return QDF_STATUS_SUCCESS;
4143 }
4144 
4145 /*
4146  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
4147  * @soc: DP SOC handle
4148  */
4149 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
4150 {
4151 	struct reo_desc_list_node *desc;
4152 	struct dp_rx_tid *rx_tid;
4153 
4154 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
4155 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
4156 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
4157 		rx_tid = &desc->rx_tid;
4158 		qdf_mem_unmap_nbytes_single(soc->osdev,
4159 			rx_tid->hw_qdesc_paddr,
4160 			QDF_DMA_BIDIRECTIONAL,
4161 			rx_tid->hw_qdesc_alloc_size);
4162 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
4163 		qdf_mem_free(desc);
4164 	}
4165 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
4166 	qdf_list_destroy(&soc->reo_desc_freelist);
4167 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
4168 }
4169 
4170 /**
4171  * dp_soc_mem_reset() - Reset Dp Soc memory
4172  * @soc: DP handle
4173  *
4174  * Return: None
4175  */
4176 static void dp_soc_mem_reset(struct dp_soc *soc)
4177 {
4178 	uint16_t len = 0;
4179 	uint8_t *dp_soc_offset = (uint8_t *)soc;
4180 
4181 	len = sizeof(struct dp_soc) -
4182 		offsetof(struct dp_soc, dp_soc_reinit) -
4183 		sizeof(soc->dp_soc_reinit);
4184 	dp_soc_offset = dp_soc_offset +
4185 			offsetof(struct dp_soc, dp_soc_reinit) +
4186 			sizeof(soc->dp_soc_reinit);
4187 
4188 	qdf_mem_zero(dp_soc_offset, len);
4189 }
4190 
4191 /**
4192  * dp_soc_deinit() - Deinitialize txrx SOC
4193  * @txrx_soc: Opaque DP SOC handle
4194  *
4195  * Return: None
4196  */
4197 static void dp_soc_deinit(void *txrx_soc)
4198 {
4199 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4200 	int i;
4201 
4202 	qdf_atomic_set(&soc->cmn_init_done, 0);
4203 
4204 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4205 		if (soc->pdev_list[i])
4206 			dp_pdev_deinit((struct cdp_pdev *)
4207 					soc->pdev_list[i], 1);
4208 	}
4209 
4210 	qdf_flush_work(&soc->htt_stats.work);
4211 	qdf_disable_work(&soc->htt_stats.work);
4212 
4213 	/* Free pending htt stats messages */
4214 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
4215 
4216 	dp_peer_find_detach(soc);
4217 
4218 	/* Free the ring memories */
4219 	/* Common rings */
4220 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
4221 
4222 	/* Tx data rings */
4223 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
4224 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
4225 			dp_srng_deinit(soc, &soc->tcl_data_ring[i],
4226 				       TCL_DATA, i);
4227 			dp_srng_deinit(soc, &soc->tx_comp_ring[i],
4228 				       WBM2SW_RELEASE, i);
4229 		}
4230 	}
4231 
4232 	/* TCL command and status rings */
4233 	dp_srng_deinit(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
4234 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
4235 
4236 	/* Rx data rings */
4237 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
4238 		soc->num_reo_dest_rings =
4239 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
4240 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
4241 			/* TODO: Get number of rings and ring sizes
4242 			 * from wlan_cfg
4243 			 */
4244 			dp_srng_deinit(soc, &soc->reo_dest_ring[i],
4245 				       REO_DST, i);
4246 		}
4247 	}
4248 	/* REO reinjection ring */
4249 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
4250 
4251 	/* Rx release ring */
4252 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
4253 
4254 	/* Rx exception ring */
4255 	/* TODO: Better to store ring_type and ring_num in
4256 	 * dp_srng during setup
4257 	 */
4258 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
4259 
4260 	/* REO command and status rings */
4261 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
4262 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
4263 
4264 	dp_soc_wds_detach(soc);
4265 
4266 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
4267 	qdf_spinlock_destroy(&soc->htt_stats.lock);
4268 
4269 	htt_soc_htc_dealloc(soc->htt_handle);
4270 
4271 	dp_reo_desc_freelist_destroy(soc);
4272 
4273 	qdf_spinlock_destroy(&soc->ast_lock);
4274 
4275 	dp_soc_mem_reset(soc);
4276 }
4277 
4278 /**
4279  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
4280  * @txrx_soc: Opaque DP SOC handle
4281  *
4282  * Return: None
4283  */
4284 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc)
4285 {
4286 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4287 
4288 	soc->dp_soc_reinit = 1;
4289 	dp_soc_deinit(txrx_soc);
4290 }
4291 
4292 /*
4293  * dp_soc_detach() - Detach rest of txrx SOC
4294  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
4295  *
4296  * Return: None
4297  */
4298 static void dp_soc_detach(struct cdp_soc_t *txrx_soc)
4299 {
4300 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4301 	int i;
4302 
4303 	qdf_atomic_set(&soc->cmn_init_done, 0);
4304 
4305 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
4306 	 * SW descriptors
4307 	 */
4308 
4309 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4310 		if (soc->pdev_list[i])
4311 			dp_pdev_detach((struct cdp_pdev *)
4312 					     soc->pdev_list[i], 1);
4313 	}
4314 
4315 	/* Free the ring memories */
4316 	/* Common rings */
4317 	qdf_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned);
4318 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
4319 
4320 	if (dp_is_soc_reinit(soc)) {
4321 		dp_tx_soc_detach(soc);
4322 	}
4323 
4324 	/* Tx data rings */
4325 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
4326 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
4327 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
4328 				TCL_DATA, i);
4329 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
4330 				WBM2SW_RELEASE, i);
4331 		}
4332 	}
4333 
4334 	/* TCL command and status rings */
4335 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
4336 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
4337 
4338 	/* Rx data rings */
4339 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
4340 		soc->num_reo_dest_rings =
4341 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
4342 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
4343 			/* TODO: Get number of rings and ring sizes
4344 			 * from wlan_cfg
4345 			 */
4346 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
4347 				REO_DST, i);
4348 		}
4349 	}
4350 	/* REO reinjection ring */
4351 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
4352 
4353 	/* Rx release ring */
4354 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
4355 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3);
4356 
4357 	/* Rx exception ring */
4358 	/* TODO: Better to store ring_type and ring_num in
4359 	 * dp_srng during setup
4360 	 */
4361 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
4362 
4363 	/* REO command and status rings */
4364 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
4365 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
4366 	dp_hw_link_desc_pool_cleanup(soc);
4367 
4368 	htt_soc_detach(soc->htt_handle);
4369 	soc->dp_soc_reinit = 0;
4370 
4371 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
4372 
4373 	qdf_minidump_remove(soc);
4374 	qdf_mem_free(soc);
4375 }
4376 
4377 /*
4378  * dp_soc_detach_wifi3() - Detach txrx SOC
4379  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
4380  *
4381  * Return: None
4382  */
4383 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc)
4384 {
4385 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4386 
4387 	if (dp_is_soc_reinit(soc)) {
4388 		dp_soc_detach(txrx_soc);
4389 	} else {
4390 		dp_soc_deinit(txrx_soc);
4391 		dp_soc_detach(txrx_soc);
4392 	}
4393 }
4394 
4395 #if !defined(DISABLE_MON_CONFIG)
4396 /**
4397  * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
4398  * @soc: soc handle
4399  * @pdev: physical device handle
4400  * @mac_id: ring number
4401  * @mac_for_pdev: mac_id
4402  *
4403  * Return: non-zero for failure, zero for success
4404  */
4405 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4406 					struct dp_pdev *pdev,
4407 					int mac_id,
4408 					int mac_for_pdev)
4409 {
4410 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4411 
4412 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
4413 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4414 					pdev->rxdma_mon_buf_ring[mac_id]
4415 					.hal_srng,
4416 					RXDMA_MONITOR_BUF);
4417 
4418 		if (status != QDF_STATUS_SUCCESS) {
4419 			dp_err("Failed to send htt srng setup message for Rxdma mon buf ring");
4420 			return status;
4421 		}
4422 
4423 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4424 					pdev->rxdma_mon_dst_ring[mac_id]
4425 					.hal_srng,
4426 					RXDMA_MONITOR_DST);
4427 
4428 		if (status != QDF_STATUS_SUCCESS) {
4429 			dp_err("Failed to send htt srng setup message for Rxdma mon dst ring");
4430 			return status;
4431 		}
4432 
4433 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4434 					pdev->rxdma_mon_status_ring[mac_id]
4435 					.hal_srng,
4436 					RXDMA_MONITOR_STATUS);
4437 
4438 		if (status != QDF_STATUS_SUCCESS) {
4439 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4440 			return status;
4441 		}
4442 
4443 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4444 					pdev->rxdma_mon_desc_ring[mac_id]
4445 					.hal_srng,
4446 					RXDMA_MONITOR_DESC);
4447 
4448 		if (status != QDF_STATUS_SUCCESS) {
4449 			dp_err("Failed to send htt srng message for Rxdma mon desc ring");
4450 			return status;
4451 		}
4452 	} else {
4453 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4454 					pdev->rxdma_mon_status_ring[mac_id]
4455 					.hal_srng,
4456 					RXDMA_MONITOR_STATUS);
4457 
4458 		if (status != QDF_STATUS_SUCCESS) {
4459 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4460 			return status;
4461 		}
4462 	}
4463 
4464 	return status;
4465 
4466 }
4467 #else
4468 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4469 					struct dp_pdev *pdev,
4470 					int mac_id,
4471 					int mac_for_pdev)
4472 {
4473 	return QDF_STATUS_SUCCESS;
4474 }
4475 #endif
4476 
4477 /*
4478  * dp_rxdma_ring_config() - configure the RX DMA rings
4479  *
4480  * This function is used to configure the MAC rings.
4481  * On MCL host provides buffers in Host2FW ring
4482  * FW refills (copies) buffers to the ring and updates
4483  * ring_idx in register
4484  *
4485  * @soc: data path SoC handle
4486  *
4487  * Return: zero on success, non-zero on failure
4488  */
4489 #ifdef QCA_HOST2FW_RXBUF_RING
4490 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4491 {
4492 	int i;
4493 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4494 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4495 		struct dp_pdev *pdev = soc->pdev_list[i];
4496 
4497 		if (pdev) {
4498 			int mac_id;
4499 			bool dbs_enable = 0;
4500 			int max_mac_rings =
4501 				 wlan_cfg_get_num_mac_rings
4502 				(pdev->wlan_cfg_ctx);
4503 
4504 			htt_srng_setup(soc->htt_handle, 0,
4505 				 pdev->rx_refill_buf_ring.hal_srng,
4506 				 RXDMA_BUF);
4507 
4508 			if (pdev->rx_refill_buf_ring2.hal_srng)
4509 				htt_srng_setup(soc->htt_handle, 0,
4510 					pdev->rx_refill_buf_ring2.hal_srng,
4511 					RXDMA_BUF);
4512 
4513 			if (soc->cdp_soc.ol_ops->
4514 				is_hw_dbs_2x2_capable) {
4515 				dbs_enable = soc->cdp_soc.ol_ops->
4516 					is_hw_dbs_2x2_capable(
4517 							(void *)soc->ctrl_psoc);
4518 			}
4519 
4520 			if (dbs_enable) {
4521 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4522 				QDF_TRACE_LEVEL_ERROR,
4523 				FL("DBS enabled max_mac_rings %d"),
4524 					 max_mac_rings);
4525 			} else {
4526 				max_mac_rings = 1;
4527 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4528 					 QDF_TRACE_LEVEL_ERROR,
4529 					 FL("DBS disabled, max_mac_rings %d"),
4530 					 max_mac_rings);
4531 			}
4532 
4533 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4534 					 FL("pdev_id %d max_mac_rings %d"),
4535 					 pdev->pdev_id, max_mac_rings);
4536 
4537 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
4538 				int mac_for_pdev = dp_get_mac_id_for_pdev(
4539 							mac_id, pdev->pdev_id);
4540 
4541 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4542 					 QDF_TRACE_LEVEL_ERROR,
4543 					 FL("mac_id %d"), mac_for_pdev);
4544 
4545 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4546 					 pdev->rx_mac_buf_ring[mac_id]
4547 						.hal_srng,
4548 					 RXDMA_BUF);
4549 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4550 					pdev->rxdma_err_dst_ring[mac_id]
4551 						.hal_srng,
4552 					RXDMA_DST);
4553 
4554 				/* Configure monitor mode rings */
4555 				status = dp_mon_htt_srng_setup(soc, pdev,
4556 							       mac_id,
4557 							       mac_for_pdev);
4558 				if (status != QDF_STATUS_SUCCESS) {
4559 					dp_err("Failed to send htt monitor messages to target");
4560 					return status;
4561 				}
4562 
4563 			}
4564 		}
4565 	}
4566 
4567 	/*
4568 	 * Timer to reap rxdma status rings.
4569 	 * Needed until we enable ppdu end interrupts
4570 	 */
4571 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
4572 			dp_service_mon_rings, (void *)soc,
4573 			QDF_TIMER_TYPE_WAKE_APPS);
4574 	soc->reap_timer_init = 1;
4575 	return status;
4576 }
4577 #else
4578 /* This is only for WIN */
4579 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4580 {
4581 	int i;
4582 	int mac_id;
4583 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4584 
4585 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4586 		struct dp_pdev *pdev = soc->pdev_list[i];
4587 
4588 		if (!pdev)
4589 			continue;
4590 
4591 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4592 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
4593 
4594 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4595 				pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
4596 #ifndef DISABLE_MON_CONFIG
4597 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4598 				pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4599 				RXDMA_MONITOR_BUF);
4600 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4601 				pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
4602 				RXDMA_MONITOR_DST);
4603 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4604 				pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4605 				RXDMA_MONITOR_STATUS);
4606 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4607 				pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
4608 				RXDMA_MONITOR_DESC);
4609 #endif
4610 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4611 				pdev->rxdma_err_dst_ring[mac_id].hal_srng,
4612 				RXDMA_DST);
4613 		}
4614 	}
4615 	return status;
4616 }
4617 #endif
4618 
4619 #ifdef NO_RX_PKT_HDR_TLV
4620 static QDF_STATUS
4621 dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
4622 {
4623 	int i;
4624 	int mac_id;
4625 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
4626 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4627 
4628 	htt_tlv_filter.mpdu_start = 1;
4629 	htt_tlv_filter.msdu_start = 1;
4630 	htt_tlv_filter.mpdu_end = 1;
4631 	htt_tlv_filter.msdu_end = 1;
4632 	htt_tlv_filter.attention = 1;
4633 	htt_tlv_filter.packet = 1;
4634 	htt_tlv_filter.packet_header = 0;
4635 
4636 	htt_tlv_filter.ppdu_start = 0;
4637 	htt_tlv_filter.ppdu_end = 0;
4638 	htt_tlv_filter.ppdu_end_user_stats = 0;
4639 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4640 	htt_tlv_filter.ppdu_end_status_done = 0;
4641 	htt_tlv_filter.enable_fp = 1;
4642 	htt_tlv_filter.enable_md = 0;
4643 	htt_tlv_filter.enable_md = 0;
4644 	htt_tlv_filter.enable_mo = 0;
4645 
4646 	htt_tlv_filter.fp_mgmt_filter = 0;
4647 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
4648 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
4649 					 FILTER_DATA_MCAST |
4650 					 FILTER_DATA_DATA);
4651 	htt_tlv_filter.mo_mgmt_filter = 0;
4652 	htt_tlv_filter.mo_ctrl_filter = 0;
4653 	htt_tlv_filter.mo_data_filter = 0;
4654 	htt_tlv_filter.md_data_filter = 0;
4655 
4656 	htt_tlv_filter.offset_valid = true;
4657 
4658 	htt_tlv_filter.rx_packet_offset = RX_PKT_TLVS_LEN;
4659 	/*Not subscribing rx_pkt_header*/
4660 	htt_tlv_filter.rx_header_offset = 0;
4661 	htt_tlv_filter.rx_mpdu_start_offset =
4662 				HAL_RX_PKT_TLV_MPDU_START_OFFSET(soc->hal_soc);
4663 	htt_tlv_filter.rx_mpdu_end_offset =
4664 				HAL_RX_PKT_TLV_MPDU_END_OFFSET(soc->hal_soc);
4665 	htt_tlv_filter.rx_msdu_start_offset =
4666 				HAL_RX_PKT_TLV_MSDU_START_OFFSET(soc->hal_soc);
4667 	htt_tlv_filter.rx_msdu_end_offset =
4668 				HAL_RX_PKT_TLV_MSDU_END_OFFSET(soc->hal_soc);
4669 	htt_tlv_filter.rx_attn_offset =
4670 				HAL_RX_PKT_TLV_ATTN_OFFSET(soc->hal_soc);
4671 
4672 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4673 		struct dp_pdev *pdev = soc->pdev_list[i];
4674 
4675 		if (!pdev)
4676 			continue;
4677 
4678 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4679 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4680 					pdev->pdev_id);
4681 
4682 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4683 					    pdev->rx_refill_buf_ring.hal_srng,
4684 					    RXDMA_BUF, RX_BUFFER_SIZE,
4685 					    &htt_tlv_filter);
4686 		}
4687 	}
4688 	return status;
4689 }
4690 #else
4691 static QDF_STATUS
4692 dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
4693 {
4694 	return QDF_STATUS_SUCCESS;
4695 }
4696 #endif
4697 
4698 /*
4699  * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
4700  *
4701  * This function is used to configure the FSE HW block in RX OLE on a
4702  * per pdev basis. Here, we will be programming parameters related to
4703  * the Flow Search Table.
4704  *
4705  * @soc: data path SoC handle
4706  *
4707  * Return: zero on success, non-zero on failure
4708  */
4709 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
4710 static QDF_STATUS
4711 dp_rx_target_fst_config(struct dp_soc *soc)
4712 {
4713 	int i;
4714 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4715 
4716 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4717 		struct dp_pdev *pdev = soc->pdev_list[i];
4718 
4719 		/* Flow search is not enabled if NSS offload is enabled */
4720 		if (pdev &&
4721 		    !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
4722 			status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
4723 			if (status != QDF_STATUS_SUCCESS)
4724 				break;
4725 		}
4726 	}
4727 	return status;
4728 }
4729 #else
4730 /**
4731  * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
4732  * @soc: SoC handle
4733  *
4734  * Return: Success
4735  */
4736 static inline QDF_STATUS
4737 dp_rx_target_fst_config(struct dp_soc *soc)
4738 {
4739 	return QDF_STATUS_SUCCESS;
4740 }
4741 
4742 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
4743 
4744 /*
4745  * dp_soc_attach_target_wifi3() - SOC initialization in the target
4746  * @cdp_soc: Opaque Datapath SOC handle
4747  *
4748  * Return: zero on success, non-zero on failure
4749  */
4750 static QDF_STATUS
4751 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
4752 {
4753 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4754 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4755 
4756 	htt_soc_attach_target(soc->htt_handle);
4757 
4758 	status = dp_rxdma_ring_config(soc);
4759 	if (status != QDF_STATUS_SUCCESS) {
4760 		dp_err("Failed to send htt srng setup messages to target");
4761 		return status;
4762 	}
4763 
4764 	status = dp_rxdma_ring_sel_cfg(soc);
4765 	if (status != QDF_STATUS_SUCCESS) {
4766 		dp_err("Failed to send htt ring config message to target");
4767 		return status;
4768 	}
4769 
4770 	status = dp_rx_target_fst_config(soc);
4771 	if (status != QDF_STATUS_SUCCESS) {
4772 		dp_err("Failed to send htt fst setup config message to target");
4773 		return status;
4774 	}
4775 
4776 	DP_STATS_INIT(soc);
4777 
4778 	/* initialize work queue for stats processing */
4779 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
4780 
4781 	qdf_minidump_log(soc, sizeof(*soc), "dp_soc");
4782 
4783 	return QDF_STATUS_SUCCESS;
4784 }
4785 
4786 /*
4787  * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
4788  * @txrx_soc: Datapath SOC handle
4789  */
4790 static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
4791 {
4792 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
4793 	return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
4794 }
4795 
4796 /*
4797  * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
4798  * @txrx_soc: Datapath SOC handle
4799  * @nss_cfg: nss config
4800  */
4801 static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
4802 {
4803 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
4804 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
4805 
4806 	wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
4807 
4808 	/*
4809 	 * TODO: masked out based on the per offloaded radio
4810 	 */
4811 	switch (config) {
4812 	case dp_nss_cfg_default:
4813 		break;
4814 	case dp_nss_cfg_first_radio:
4815 		/*
4816 		 * This configuration is valid for single band radio which
4817 		 * is also NSS offload.
4818 		 */
4819 	case dp_nss_cfg_dbdc:
4820 	case dp_nss_cfg_dbtc:
4821 		wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
4822 		wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
4823 		wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
4824 		wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
4825 		break;
4826 	default:
4827 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4828 			  "Invalid offload config %d", config);
4829 	}
4830 
4831 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4832 		  FL("nss-wifi<0> nss config is enabled"));
4833 }
4834 
4835 /*
4836 * dp_vdev_attach_wifi3() - attach txrx vdev
4837 * @txrx_pdev: Datapath PDEV handle
4838 * @vdev_mac_addr: MAC address of the virtual interface
4839 * @vdev_id: VDEV Id
4840 * @wlan_op_mode: VDEV operating mode
4841 * @subtype: VDEV operating subtype
4842 *
4843 * Return: DP VDEV handle on success, NULL on failure
4844 */
4845 static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
4846 					     uint8_t pdev_id,
4847 					     uint8_t *vdev_mac_addr,
4848 					     uint8_t vdev_id,
4849 					     enum wlan_op_mode op_mode,
4850 					     enum wlan_op_subtype subtype)
4851 {
4852 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4853 	struct dp_pdev *pdev =
4854 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
4855 						   pdev_id);
4856 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
4857 
4858 	if (!pdev) {
4859 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4860 			  FL("DP PDEV is Null for pdev id %d"), pdev_id);
4861 		qdf_mem_free(vdev);
4862 		goto fail0;
4863 	}
4864 
4865 	if (!vdev) {
4866 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4867 			FL("DP VDEV memory allocation failed"));
4868 		goto fail0;
4869 	}
4870 
4871 	vdev->pdev = pdev;
4872 	vdev->vdev_id = vdev_id;
4873 	vdev->opmode = op_mode;
4874 	vdev->subtype = subtype;
4875 	vdev->osdev = soc->osdev;
4876 
4877 	vdev->osif_rx = NULL;
4878 	vdev->osif_rsim_rx_decap = NULL;
4879 	vdev->osif_get_key = NULL;
4880 	vdev->osif_rx_mon = NULL;
4881 	vdev->osif_tx_free_ext = NULL;
4882 	vdev->osif_vdev = NULL;
4883 
4884 	vdev->delete.pending = 0;
4885 	vdev->safemode = 0;
4886 	vdev->drop_unenc = 1;
4887 	vdev->sec_type = cdp_sec_type_none;
4888 #ifdef notyet
4889 	vdev->filters_num = 0;
4890 #endif
4891 
4892 	qdf_mem_copy(
4893 		&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
4894 
4895 	/* TODO: Initialize default HTT meta data that will be used in
4896 	 * TCL descriptors for packets transmitted from this VDEV
4897 	 */
4898 
4899 	TAILQ_INIT(&vdev->peer_list);
4900 	dp_peer_multipass_list_init(vdev);
4901 
4902 	if ((soc->intr_mode == DP_INTR_POLL) &&
4903 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
4904 		if ((pdev->vdev_count == 0) ||
4905 		    (wlan_op_mode_monitor == vdev->opmode))
4906 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
4907 	}
4908 
4909 	soc->vdev_id_map[vdev_id] = vdev;
4910 
4911 	if (wlan_op_mode_monitor == vdev->opmode) {
4912 		pdev->monitor_vdev = vdev;
4913 		return (struct cdp_vdev *)vdev;
4914 	}
4915 
4916 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
4917 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
4918 	vdev->dscp_tid_map_id = 0;
4919 	vdev->mcast_enhancement_en = 0;
4920 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
4921 	vdev->prev_tx_enq_tstamp = 0;
4922 	vdev->prev_rx_deliver_tstamp = 0;
4923 
4924 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4925 	/* add this vdev into the pdev's list */
4926 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
4927 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4928 	pdev->vdev_count++;
4929 
4930 	if (wlan_op_mode_sta != vdev->opmode)
4931 		vdev->ap_bridge_enabled = true;
4932 	else
4933 		vdev->ap_bridge_enabled = false;
4934 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4935 		  "%s: wlan_cfg_ap_bridge_enabled %d",
4936 		  __func__, vdev->ap_bridge_enabled);
4937 
4938 	dp_tx_vdev_attach(vdev);
4939 
4940 	if (pdev->vdev_count == 1)
4941 		dp_lro_hash_setup(soc, pdev);
4942 
4943 	dp_info("Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
4944 	DP_STATS_INIT(vdev);
4945 
4946 	if (wlan_op_mode_sta == vdev->opmode)
4947 		dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
4948 				     vdev->mac_addr.raw);
4949 
4950 	return (struct cdp_vdev *)vdev;
4951 
4952 fail0:
4953 	return NULL;
4954 }
4955 
4956 /**
4957  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
4958  * @soc: Datapath soc handle
4959  * @vdev_id: id of Datapath VDEV handle
4960  * @osif_vdev: OSIF vdev handle
4961  * @txrx_ops: Tx and Rx operations
4962  *
4963  * Return: DP VDEV handle on success, NULL on failure
4964  */
4965 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc,
4966 					 uint8_t vdev_id,
4967 					 ol_osif_vdev_handle osif_vdev,
4968 					 struct ol_txrx_ops *txrx_ops)
4969 {
4970 	struct dp_vdev *vdev =
4971 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
4972 						   vdev_id);
4973 
4974 	if (!vdev)
4975 		return QDF_STATUS_E_FAILURE;
4976 
4977 	vdev->osif_vdev = osif_vdev;
4978 	vdev->osif_rx = txrx_ops->rx.rx;
4979 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
4980 	vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
4981 	vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
4982 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
4983 	vdev->osif_get_key = txrx_ops->get_key;
4984 	vdev->osif_rx_mon = txrx_ops->rx.mon;
4985 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
4986 	vdev->tx_comp = txrx_ops->tx.tx_comp;
4987 #ifdef notyet
4988 #if ATH_SUPPORT_WAPI
4989 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
4990 #endif
4991 #endif
4992 #ifdef UMAC_SUPPORT_PROXY_ARP
4993 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
4994 #endif
4995 	vdev->me_convert = txrx_ops->me_convert;
4996 
4997 	/* TODO: Enable the following once Tx code is integrated */
4998 	if (vdev->mesh_vdev)
4999 		txrx_ops->tx.tx = dp_tx_send_mesh;
5000 	else
5001 		txrx_ops->tx.tx = dp_tx_send;
5002 
5003 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
5004 
5005 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
5006 		"DP Vdev Register success");
5007 
5008 	return QDF_STATUS_SUCCESS;
5009 }
5010 
5011 /**
5012  * dp_peer_flush_ast_entry() - Forcibily flush all AST entry of peer
5013  * @soc: Datapath soc handle
5014  * @peer: Datapath peer handle
5015  * @peer_id: Peer ID
5016  * @vdev_id: Vdev ID
5017  *
5018  * Return: void
5019  */
5020 static void dp_peer_flush_ast_entry(struct dp_soc *soc,
5021 				    struct dp_peer *peer,
5022 				    uint16_t peer_id,
5023 				    uint8_t vdev_id)
5024 {
5025 	struct dp_ast_entry *ase, *tmp_ase;
5026 
5027 	if (soc->is_peer_map_unmap_v2) {
5028 		DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
5029 				dp_rx_peer_unmap_handler
5030 						(soc, peer_id,
5031 						 vdev_id,
5032 						 ase->mac_addr.raw,
5033 						 1);
5034 		}
5035 	}
5036 }
5037 
5038 /**
5039  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
5040  * @vdev: Datapath VDEV handle
5041  * @unmap_only: Flag to indicate "only unmap"
5042  *
5043  * Return: void
5044  */
5045 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
5046 {
5047 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5048 	struct dp_pdev *pdev = vdev->pdev;
5049 	struct dp_soc *soc = pdev->soc;
5050 	struct dp_peer *peer;
5051 	uint16_t *peer_ids;
5052 	struct dp_peer **peer_array = NULL;
5053 	uint8_t i = 0, j = 0;
5054 	uint8_t m = 0, n = 0;
5055 
5056 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
5057 	if (!peer_ids) {
5058 		dp_err("DP alloc failure - unable to flush peers");
5059 		return;
5060 	}
5061 
5062 	if (!unmap_only) {
5063 		peer_array = qdf_mem_malloc(
5064 				soc->max_peers * sizeof(struct dp_peer *));
5065 		if (!peer_array) {
5066 			qdf_mem_free(peer_ids);
5067 			dp_err("DP alloc failure - unable to flush peers");
5068 			return;
5069 		}
5070 	}
5071 
5072 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5073 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5074 		if (!unmap_only && n < soc->max_peers)
5075 			peer_array[n++] = peer;
5076 
5077 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
5078 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
5079 				if (j < soc->max_peers)
5080 					peer_ids[j++] = peer->peer_ids[i];
5081 	}
5082 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5083 
5084 	/*
5085 	 * If peer id is invalid, need to flush the peer if
5086 	 * peer valid flag is true, this is needed for NAN + SSR case.
5087 	 */
5088 	if (!unmap_only) {
5089 		for (m = 0; m < n ; m++) {
5090 			peer = peer_array[m];
5091 
5092 			dp_info("peer: %pM is getting deleted",
5093 				peer->mac_addr.raw);
5094 			/* only if peer valid is true */
5095 			if (peer->valid)
5096 				dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
5097 						     vdev->vdev_id,
5098 						     peer->mac_addr.raw, 0);
5099 		}
5100 		qdf_mem_free(peer_array);
5101 	}
5102 
5103 	for (i = 0; i < j ; i++) {
5104 		peer = __dp_peer_find_by_id(soc, peer_ids[i]);
5105 
5106 		if (!peer)
5107 			continue;
5108 
5109 		dp_info("peer: %pM is getting unmap",
5110 			peer->mac_addr.raw);
5111 		/* free AST entries of peer */
5112 		dp_peer_flush_ast_entry(soc, peer,
5113 					peer_ids[i],
5114 					vdev->vdev_id);
5115 
5116 		dp_rx_peer_unmap_handler(soc, peer_ids[i],
5117 					 vdev->vdev_id,
5118 					 peer->mac_addr.raw, 0);
5119 	}
5120 
5121 	qdf_mem_free(peer_ids);
5122 	dp_info("Flushed peers for vdev object %pK ", vdev);
5123 }
5124 
5125 /*
5126  * dp_vdev_detach_wifi3() - Detach txrx vdev
5127  * @cdp_soc: Datapath soc handle
5128  * @vdev_id: VDEV Id
5129  * @callback: Callback OL_IF on completion of detach
5130  * @cb_context:	Callback context
5131  *
5132  */
5133 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
5134 				       uint8_t vdev_id,
5135 				       ol_txrx_vdev_delete_cb callback,
5136 				       void *cb_context)
5137 {
5138 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
5139 	struct dp_pdev *pdev;
5140 	struct dp_neighbour_peer *peer = NULL;
5141 	struct dp_neighbour_peer *temp_peer = NULL;
5142 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
5143 
5144 	if (!vdev)
5145 		return QDF_STATUS_E_FAILURE;
5146 
5147 	pdev = vdev->pdev;
5148 
5149 	soc->vdev_id_map[vdev->vdev_id] = NULL;
5150 
5151 	if (wlan_op_mode_sta == vdev->opmode)
5152 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id,
5153 				     vdev->vap_self_peer->mac_addr.raw, 0);
5154 
5155 	/*
5156 	 * If Target is hung, flush all peers before detaching vdev
5157 	 * this will free all references held due to missing
5158 	 * unmap commands from Target
5159 	 */
5160 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
5161 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
5162 
5163 	/*
5164 	 * Use peer_ref_mutex while accessing peer_list, in case
5165 	 * a peer is in the process of being removed from the list.
5166 	 */
5167 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5168 	/* check that the vdev has no peers allocated */
5169 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
5170 		/* debug print - will be removed later */
5171 		dp_warn("not deleting vdev object %pK (%pM) until deletion finishes for all its peers",
5172 			vdev, vdev->mac_addr.raw);
5173 		/* indicate that the vdev needs to be deleted */
5174 		vdev->delete.pending = 1;
5175 		vdev->delete.callback = callback;
5176 		vdev->delete.context = cb_context;
5177 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5178 		return QDF_STATUS_E_FAILURE;
5179 	}
5180 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5181 
5182 	if (wlan_op_mode_monitor == vdev->opmode)
5183 		goto free_vdev;
5184 
5185 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5186 	if (!soc->hw_nac_monitor_support) {
5187 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
5188 			      neighbour_peer_list_elem) {
5189 			QDF_ASSERT(peer->vdev != vdev);
5190 		}
5191 	} else {
5192 		TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
5193 				   neighbour_peer_list_elem, temp_peer) {
5194 			if (peer->vdev == vdev) {
5195 				TAILQ_REMOVE(&pdev->neighbour_peers_list, peer,
5196 					     neighbour_peer_list_elem);
5197 				qdf_mem_free(peer);
5198 			}
5199 		}
5200 	}
5201 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5202 
5203 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5204 	dp_tx_vdev_detach(vdev);
5205 	dp_rx_vdev_detach(vdev);
5206 	/* remove the vdev from its parent pdev's list */
5207 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
5208 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5209 
5210 free_vdev:
5211 	if (wlan_op_mode_monitor == vdev->opmode)
5212 		pdev->monitor_vdev = NULL;
5213 
5214 	dp_info("deleting vdev object %pK (%pM)", vdev, vdev->mac_addr.raw);
5215 	qdf_mem_free(vdev);
5216 
5217 	if (callback)
5218 		callback(cb_context);
5219 
5220 	return QDF_STATUS_SUCCESS;
5221 }
5222 
5223 #ifdef FEATURE_AST
5224 /*
5225  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
5226  * @soc - datapath soc handle
5227  * @peer - datapath peer handle
5228  *
5229  * Delete the AST entries belonging to a peer
5230  */
5231 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
5232 					      struct dp_peer *peer)
5233 {
5234 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
5235 
5236 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
5237 		dp_peer_del_ast(soc, ast_entry);
5238 
5239 	peer->self_ast_entry = NULL;
5240 }
5241 #else
5242 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
5243 					      struct dp_peer *peer)
5244 {
5245 }
5246 #endif
5247 #if ATH_SUPPORT_WRAP
5248 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
5249 						uint8_t *peer_mac_addr)
5250 {
5251 	struct dp_peer *peer;
5252 
5253 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
5254 				      0, vdev->vdev_id);
5255 	if (!peer)
5256 		return NULL;
5257 
5258 	if (peer->bss_peer)
5259 		return peer;
5260 
5261 	dp_peer_unref_delete(peer);
5262 	return NULL;
5263 }
5264 #else
5265 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
5266 						uint8_t *peer_mac_addr)
5267 {
5268 	struct dp_peer *peer;
5269 
5270 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
5271 				      0, vdev->vdev_id);
5272 	if (!peer)
5273 		return NULL;
5274 
5275 	if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
5276 		return peer;
5277 
5278 	dp_peer_unref_delete(peer);
5279 	return NULL;
5280 }
5281 #endif
5282 
5283 #ifdef FEATURE_AST
5284 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
5285 					       struct dp_pdev *pdev,
5286 					       uint8_t *peer_mac_addr)
5287 {
5288 	struct dp_ast_entry *ast_entry;
5289 
5290 	qdf_spin_lock_bh(&soc->ast_lock);
5291 	if (soc->ast_override_support)
5292 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
5293 							    pdev->pdev_id);
5294 	else
5295 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
5296 
5297 	if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
5298 		dp_peer_del_ast(soc, ast_entry);
5299 
5300 	qdf_spin_unlock_bh(&soc->ast_lock);
5301 }
5302 #endif
5303 
5304 #ifdef PEER_CACHE_RX_PKTS
5305 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
5306 {
5307 	qdf_spinlock_create(&peer->bufq_info.bufq_lock);
5308 	peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
5309 	qdf_list_create(&peer->bufq_info.cached_bufq, DP_RX_CACHED_BUFQ_THRESH);
5310 }
5311 #else
5312 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
5313 {
5314 }
5315 #endif
5316 
5317 #ifdef WLAN_FEATURE_STATS_EXT
5318 /*
5319  * dp_set_ignore_reo_status_cb() - set ignore reo status cb flag
5320  * @soc: dp soc handle
5321  * @flag: flag to set or reset
5322  *
5323  * Return: None
5324  */
5325 static inline void dp_set_ignore_reo_status_cb(struct dp_soc *soc,
5326 					       bool flag)
5327 {
5328 	soc->ignore_reo_status_cb = flag;
5329 }
5330 #else
5331 static inline void dp_set_ignore_reo_status_cb(struct dp_soc *soc,
5332 					       bool flag)
5333 {
5334 }
5335 #endif
5336 
5337 /*
5338  * dp_peer_create_wifi3() - attach txrx peer
5339  * @soc_hdl: Datapath soc handle
5340  * @vdev_id: id of vdev
5341  * @peer_mac_addr: Peer MAC address
5342  *
5343  * Return: DP peeer handle on success, NULL on failure
5344  */
5345 static void *dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5346 				  uint8_t *peer_mac_addr)
5347 {
5348 	struct dp_peer *peer;
5349 	int i;
5350 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5351 	struct dp_pdev *pdev;
5352 	struct cdp_peer_cookie peer_cookie;
5353 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
5354 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
5355 
5356 	if (!vdev || !peer_mac_addr)
5357 		return NULL;
5358 
5359 	pdev = vdev->pdev;
5360 	soc = pdev->soc;
5361 
5362 	/*
5363 	 * If a peer entry with given MAC address already exists,
5364 	 * reuse the peer and reset the state of peer.
5365 	 */
5366 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
5367 
5368 	if (peer) {
5369 		qdf_atomic_init(&peer->is_default_route_set);
5370 		dp_peer_cleanup(vdev, peer, true);
5371 
5372 		qdf_spin_lock_bh(&soc->ast_lock);
5373 		dp_peer_delete_ast_entries(soc, peer);
5374 		peer->delete_in_progress = false;
5375 		qdf_spin_unlock_bh(&soc->ast_lock);
5376 
5377 		if ((vdev->opmode == wlan_op_mode_sta) &&
5378 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
5379 		     QDF_MAC_ADDR_SIZE)) {
5380 			ast_type = CDP_TXRX_AST_TYPE_SELF;
5381 		}
5382 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
5383 		/*
5384 		* Control path maintains a node count which is incremented
5385 		* for every new peer create command. Since new peer is not being
5386 		* created and earlier reference is reused here,
5387 		* peer_unref_delete event is sent to control path to
5388 		* increment the count back.
5389 		*/
5390 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
5391 			soc->cdp_soc.ol_ops->peer_unref_delete(
5392 				soc->ctrl_psoc,
5393 				pdev->pdev_id,
5394 				peer->mac_addr.raw, vdev->mac_addr.raw,
5395 				vdev->opmode);
5396 		}
5397 
5398 		dp_local_peer_id_alloc(pdev, peer);
5399 
5400 		qdf_spinlock_create(&peer->peer_info_lock);
5401 		dp_peer_rx_bufq_resources_init(peer);
5402 
5403 		DP_STATS_INIT(peer);
5404 		DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
5405 
5406 		return (void *)peer;
5407 	} else {
5408 		/*
5409 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
5410 		 * need to remove the AST entry which was earlier added as a WDS
5411 		 * entry.
5412 		 * If an AST entry exists, but no peer entry exists with a given
5413 		 * MAC addresses, we could deduce it as a WDS entry
5414 		 */
5415 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
5416 	}
5417 
5418 #ifdef notyet
5419 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
5420 		soc->mempool_ol_ath_peer);
5421 #else
5422 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
5423 #endif
5424 
5425 	if (!peer)
5426 		return NULL; /* failure */
5427 
5428 	qdf_mem_zero(peer, sizeof(struct dp_peer));
5429 
5430 	TAILQ_INIT(&peer->ast_entry_list);
5431 
5432 	/* store provided params */
5433 	peer->vdev = vdev;
5434 
5435 	if ((vdev->opmode == wlan_op_mode_sta) &&
5436 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
5437 			 QDF_MAC_ADDR_SIZE)) {
5438 		ast_type = CDP_TXRX_AST_TYPE_SELF;
5439 	}
5440 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
5441 	qdf_spinlock_create(&peer->peer_info_lock);
5442 
5443 	dp_peer_rx_bufq_resources_init(peer);
5444 
5445 	qdf_mem_copy(
5446 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
5447 
5448 	/* initialize the peer_id */
5449 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
5450 		peer->peer_ids[i] = HTT_INVALID_PEER;
5451 
5452 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5453 
5454 	qdf_atomic_init(&peer->ref_cnt);
5455 
5456 	/* keep one reference for attach */
5457 	qdf_atomic_inc(&peer->ref_cnt);
5458 
5459 	/* add this peer into the vdev's list */
5460 	if (wlan_op_mode_sta == vdev->opmode)
5461 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
5462 	else
5463 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
5464 
5465 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5466 
5467 	/* TODO: See if hash based search is required */
5468 	dp_peer_find_hash_add(soc, peer);
5469 
5470 	/* Initialize the peer state */
5471 	peer->state = OL_TXRX_PEER_STATE_DISC;
5472 
5473 	dp_info("vdev %pK created peer %pK (%pM) ref_cnt: %d",
5474 		vdev, peer, peer->mac_addr.raw,
5475 		qdf_atomic_read(&peer->ref_cnt));
5476 	/*
5477 	 * For every peer MAp message search and set if bss_peer
5478 	 */
5479 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
5480 			QDF_MAC_ADDR_SIZE) == 0 &&
5481 			(wlan_op_mode_sta != vdev->opmode)) {
5482 		dp_info("vdev bss_peer!!");
5483 		peer->bss_peer = 1;
5484 		vdev->vap_bss_peer = peer;
5485 	}
5486 
5487 	if (wlan_op_mode_sta == vdev->opmode &&
5488 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
5489 			QDF_MAC_ADDR_SIZE) == 0) {
5490 		vdev->vap_self_peer = peer;
5491 	}
5492 
5493 	if (wlan_op_mode_sta == vdev->opmode &&
5494 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
5495 			QDF_MAC_ADDR_SIZE) != 0) {
5496 		dp_set_ignore_reo_status_cb(soc, false);
5497 	}
5498 
5499 	for (i = 0; i < DP_MAX_TIDS; i++)
5500 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
5501 
5502 	peer->valid = 1;
5503 	dp_local_peer_id_alloc(pdev, peer);
5504 	DP_STATS_INIT(peer);
5505 	DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
5506 
5507 	qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
5508 		     QDF_MAC_ADDR_SIZE);
5509 	peer_cookie.ctx = NULL;
5510 	peer_cookie.pdev_id = pdev->pdev_id;
5511 	peer_cookie.cookie = pdev->next_peer_cookie++;
5512 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
5513 	dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, pdev->soc,
5514 			     (void *)&peer_cookie,
5515 			     peer->peer_ids[0], WDI_NO_VAL, pdev->pdev_id);
5516 #endif
5517 	if (soc->wlanstats_enabled) {
5518 		if (!peer_cookie.ctx) {
5519 			pdev->next_peer_cookie--;
5520 			qdf_err("Failed to initialize peer rate stats");
5521 		} else {
5522 			peer->wlanstats_ctx = (struct cdp_peer_rate_stats_ctx *)
5523 						peer_cookie.ctx;
5524 		}
5525 	}
5526 	return (void *)peer;
5527 }
5528 
5529 /*
5530  * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
5531  * @vdev: Datapath VDEV handle
5532  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5533  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5534  *
5535  * Return: None
5536  */
5537 static
5538 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
5539 				  enum cdp_host_reo_dest_ring *reo_dest,
5540 				  bool *hash_based)
5541 {
5542 	struct dp_soc *soc;
5543 	struct dp_pdev *pdev;
5544 
5545 	pdev = vdev->pdev;
5546 	soc = pdev->soc;
5547 	/*
5548 	 * hash based steering is disabled for Radios which are offloaded
5549 	 * to NSS
5550 	 */
5551 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
5552 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
5553 
5554 	/*
5555 	 * Below line of code will ensure the proper reo_dest ring is chosen
5556 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
5557 	 */
5558 	*reo_dest = pdev->reo_dest;
5559 }
5560 
5561 #ifdef IPA_OFFLOAD
5562 /**
5563  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
5564  * @vdev: Virtual device
5565  *
5566  * Return: true if the vdev is of subtype P2P
5567  *	   false if the vdev is of any other subtype
5568  */
5569 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
5570 {
5571 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
5572 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
5573 	    vdev->subtype == wlan_op_subtype_p2p_go)
5574 		return true;
5575 
5576 	return false;
5577 }
5578 
5579 /*
5580  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
5581  * @vdev: Datapath VDEV handle
5582  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5583  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5584  *
5585  * If IPA is enabled in ini, for SAP mode, disable hash based
5586  * steering, use default reo_dst ring for RX. Use config values for other modes.
5587  * Return: None
5588  */
5589 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
5590 				       enum cdp_host_reo_dest_ring *reo_dest,
5591 				       bool *hash_based)
5592 {
5593 	struct dp_soc *soc;
5594 	struct dp_pdev *pdev;
5595 
5596 	pdev = vdev->pdev;
5597 	soc = pdev->soc;
5598 
5599 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
5600 
5601 	/* For P2P-GO interfaces we do not need to change the REO
5602 	 * configuration even if IPA config is enabled
5603 	 */
5604 	if (dp_is_vdev_subtype_p2p(vdev))
5605 		return;
5606 
5607 	/*
5608 	 * If IPA is enabled, disable hash-based flow steering and set
5609 	 * reo_dest_ring_4 as the REO ring to receive packets on.
5610 	 * IPA is configured to reap reo_dest_ring_4.
5611 	 *
5612 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
5613 	 * value enum value is from 1 - 4.
5614 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
5615 	 */
5616 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5617 		if (vdev->opmode == wlan_op_mode_ap) {
5618 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
5619 			*hash_based = 0;
5620 		} else if (vdev->opmode == wlan_op_mode_sta &&
5621 			   dp_ipa_is_mdm_platform()) {
5622 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
5623 		}
5624 	}
5625 }
5626 
5627 #else
5628 
5629 /*
5630  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
5631  * @vdev: Datapath VDEV handle
5632  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5633  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5634  *
5635  * Use system config values for hash based steering.
5636  * Return: None
5637  */
5638 
5639 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
5640 				       enum cdp_host_reo_dest_ring *reo_dest,
5641 				       bool *hash_based)
5642 {
5643 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
5644 }
5645 #endif /* IPA_OFFLOAD */
5646 
5647 /*
5648  * dp_peer_setup_wifi3() - initialize the peer
5649  * @soc_hdl: soc handle object
5650  * @vdev_id : vdev_id of vdev object
5651  * @peer_mac: Peer's mac address
5652  *
5653  * Return: QDF_STATUS
5654  */
5655 static QDF_STATUS
5656 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5657 		    uint8_t *peer_mac)
5658 {
5659 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5660 	struct dp_pdev *pdev;
5661 	bool hash_based = 0;
5662 	enum cdp_host_reo_dest_ring reo_dest;
5663 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5664 	struct dp_vdev *vdev =
5665 			dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
5666 	struct dp_peer *peer =
5667 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id);
5668 
5669 	if (!vdev || !peer || peer->delete_in_progress) {
5670 		status = QDF_STATUS_E_FAILURE;
5671 		goto fail;
5672 	}
5673 
5674 	pdev = vdev->pdev;
5675 	dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
5676 
5677 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
5678 		pdev->pdev_id, vdev->vdev_id,
5679 		vdev->opmode, hash_based, reo_dest);
5680 
5681 
5682 	/*
5683 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
5684 	 * i.e both the devices have same MAC address. In these
5685 	 * cases we want such pkts to be processed in NULL Q handler
5686 	 * which is REO2TCL ring. for this reason we should
5687 	 * not setup reo_queues and default route for bss_peer.
5688 	 */
5689 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
5690 		status = QDF_STATUS_E_FAILURE;
5691 		goto fail;
5692 	}
5693 
5694 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
5695 		/* TODO: Check the destination ring number to be passed to FW */
5696 		soc->cdp_soc.ol_ops->peer_set_default_routing(
5697 				soc->ctrl_psoc,
5698 				peer->vdev->pdev->pdev_id,
5699 				peer->mac_addr.raw,
5700 				peer->vdev->vdev_id, hash_based, reo_dest);
5701 	}
5702 
5703 	qdf_atomic_set(&peer->is_default_route_set, 1);
5704 
5705 	dp_peer_rx_init(pdev, peer);
5706 	dp_peer_tx_init(pdev, peer);
5707 
5708 	dp_peer_ppdu_delayed_ba_init(peer);
5709 
5710 fail:
5711 	if (peer)
5712 		dp_peer_unref_delete(peer);
5713 	return status;
5714 }
5715 
5716 /*
5717  * dp_cp_peer_del_resp_handler - Handle the peer delete response
5718  * @soc_hdl: Datapath SOC handle
5719  * @vdev_id: id of virtual device object
5720  * @mac_addr: Mac address of the peer
5721  *
5722  * Return: QDF_STATUS
5723  */
5724 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
5725 					      uint8_t vdev_id,
5726 					      uint8_t *mac_addr)
5727 {
5728 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5729 	struct dp_ast_entry  *ast_entry = NULL;
5730 	txrx_ast_free_cb cb = NULL;
5731 	void *cookie;
5732 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
5733 
5734 	if (!vdev)
5735 		return QDF_STATUS_E_FAILURE;
5736 
5737 	qdf_spin_lock_bh(&soc->ast_lock);
5738 
5739 	if (soc->ast_override_support)
5740 		ast_entry =
5741 			dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
5742 							vdev->pdev->pdev_id);
5743 	else
5744 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
5745 
5746 	/* in case of qwrap we have multiple BSS peers
5747 	 * with same mac address
5748 	 *
5749 	 * AST entry for this mac address will be created
5750 	 * only for one peer hence it will be NULL here
5751 	 */
5752 	if (!ast_entry || ast_entry->peer || !ast_entry->delete_in_progress) {
5753 		qdf_spin_unlock_bh(&soc->ast_lock);
5754 		return QDF_STATUS_E_FAILURE;
5755 	}
5756 
5757 	if (ast_entry->is_mapped)
5758 		soc->ast_table[ast_entry->ast_idx] = NULL;
5759 
5760 	DP_STATS_INC(soc, ast.deleted, 1);
5761 	dp_peer_ast_hash_remove(soc, ast_entry);
5762 
5763 	cb = ast_entry->callback;
5764 	cookie = ast_entry->cookie;
5765 	ast_entry->callback = NULL;
5766 	ast_entry->cookie = NULL;
5767 
5768 	soc->num_ast_entries--;
5769 	qdf_spin_unlock_bh(&soc->ast_lock);
5770 
5771 	if (cb) {
5772 		cb(soc->ctrl_psoc,
5773 		   dp_soc_to_cdp_soc(soc),
5774 		   cookie,
5775 		   CDP_TXRX_AST_DELETED);
5776 	}
5777 	qdf_mem_free(ast_entry);
5778 
5779 	return QDF_STATUS_SUCCESS;
5780 }
5781 
5782 /*
5783  * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
5784  * @vdev_handle: virtual device object
5785  * @htt_pkt_type: type of pkt
5786  *
5787  * Return: void
5788  */
5789 static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
5790 	 enum htt_cmn_pkt_type val)
5791 {
5792 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5793 	vdev->tx_encap_type = val;
5794 }
5795 
5796 /*
5797  * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
5798  * @vdev_handle: virtual device object
5799  * @htt_pkt_type: type of pkt
5800  *
5801  * Return: void
5802  */
5803 static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
5804 	 enum htt_cmn_pkt_type val)
5805 {
5806 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5807 	vdev->rx_decap_type = val;
5808 }
5809 
5810 /*
5811  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
5812  * @txrx_soc: cdp soc handle
5813  * @ac: Access category
5814  * @value: timeout value in millisec
5815  *
5816  * Return: void
5817  */
5818 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
5819 				    uint8_t ac, uint32_t value)
5820 {
5821 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5822 
5823 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
5824 }
5825 
5826 /*
5827  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
5828  * @txrx_soc: cdp soc handle
5829  * @ac: access category
5830  * @value: timeout value in millisec
5831  *
5832  * Return: void
5833  */
5834 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
5835 				    uint8_t ac, uint32_t *value)
5836 {
5837 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5838 
5839 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
5840 }
5841 
5842 /*
5843  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
5844  * @pdev_handle: physical device object
5845  * @val: reo destination ring index (1 - 4)
5846  *
5847  * Return: void
5848  */
5849 static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
5850 	 enum cdp_host_reo_dest_ring val)
5851 {
5852 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5853 
5854 	if (pdev)
5855 		pdev->reo_dest = val;
5856 }
5857 
5858 /*
5859  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
5860  * @pdev_handle: physical device object
5861  *
5862  * Return: reo destination ring index
5863  */
5864 static enum cdp_host_reo_dest_ring
5865 dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
5866 {
5867 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5868 
5869 	if (pdev)
5870 		return pdev->reo_dest;
5871 	else
5872 		return cdp_host_reo_dest_ring_unknown;
5873 }
5874 
5875 /*
5876  * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
5877  * @pdev_handle: device object
5878  * @val: value to be set
5879  *
5880  * Return: void
5881  */
5882 static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
5883 	 uint32_t val)
5884 {
5885 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5886 
5887 	/* Enable/Disable smart mesh filtering. This flag will be checked
5888 	 * during rx processing to check if packets are from NAC clients.
5889 	 */
5890 	pdev->filter_neighbour_peers = val;
5891 	return 0;
5892 }
5893 
5894 /*
5895  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
5896  * address for smart mesh filtering
5897  * @vdev_handle: virtual device object
5898  * @cmd: Add/Del command
5899  * @macaddr: nac client mac address
5900  *
5901  * Return: void
5902  */
5903 static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
5904 					    uint32_t cmd, uint8_t *macaddr)
5905 {
5906 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5907 	struct dp_pdev *pdev = vdev->pdev;
5908 	struct dp_neighbour_peer *peer = NULL;
5909 
5910 	if (!macaddr)
5911 		goto fail0;
5912 
5913 	/* Store address of NAC (neighbour peer) which will be checked
5914 	 * against TA of received packets.
5915 	 */
5916 	if (cmd == DP_NAC_PARAM_ADD) {
5917 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
5918 				sizeof(*peer));
5919 
5920 		if (!peer) {
5921 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5922 				FL("DP neighbour peer node memory allocation failed"));
5923 			goto fail0;
5924 		}
5925 
5926 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
5927 			macaddr, QDF_MAC_ADDR_SIZE);
5928 		peer->vdev = vdev;
5929 
5930 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5931 
5932 		/* add this neighbour peer into the list */
5933 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
5934 				neighbour_peer_list_elem);
5935 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5936 
5937 		/* first neighbour */
5938 		if (!pdev->neighbour_peers_added) {
5939 			pdev->neighbour_peers_added = true;
5940 			dp_ppdu_ring_cfg(pdev);
5941 		}
5942 		return 1;
5943 
5944 	} else if (cmd == DP_NAC_PARAM_DEL) {
5945 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5946 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
5947 				neighbour_peer_list_elem) {
5948 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
5949 				macaddr, QDF_MAC_ADDR_SIZE)) {
5950 				/* delete this peer from the list */
5951 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
5952 					peer, neighbour_peer_list_elem);
5953 				qdf_mem_free(peer);
5954 				break;
5955 			}
5956 		}
5957 		/* last neighbour deleted */
5958 		if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) {
5959 			pdev->neighbour_peers_added = false;
5960 			dp_ppdu_ring_cfg(pdev);
5961 		}
5962 
5963 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5964 
5965 		if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
5966 		    !pdev->enhanced_stats_en)
5967 			dp_ppdu_ring_reset(pdev);
5968 		return 1;
5969 
5970 	}
5971 
5972 fail0:
5973 	return 0;
5974 }
5975 
5976 /*
5977  * dp_get_sec_type() - Get the security type
5978  * @peer:		Datapath peer handle
5979  * @sec_idx:    Security id (mcast, ucast)
5980  *
5981  * return sec_type: Security type
5982  */
5983 static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
5984 {
5985 	struct dp_peer *dpeer = (struct dp_peer *)peer;
5986 
5987 	return dpeer->security[sec_idx].sec_type;
5988 }
5989 
5990 /*
5991  * dp_peer_authorize() - authorize txrx peer
5992  * @peer_handle:		Datapath peer handle
5993  * @authorize
5994  *
5995  */
5996 static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
5997 {
5998 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5999 	struct dp_soc *soc;
6000 
6001 	if (peer) {
6002 		soc = peer->vdev->pdev->soc;
6003 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
6004 		peer->authorize = authorize ? 1 : 0;
6005 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6006 	}
6007 }
6008 
6009 /*
6010  * dp_vdev_reset_peer() - Update peer related member in vdev
6011 			  as peer is going to free
6012  * @vdev: datapath vdev handle
6013  * @peer: dataptah peer handle
6014  *
6015  * Return: None
6016  */
6017 static void dp_vdev_reset_peer(struct dp_vdev *vdev,
6018 			       struct dp_peer *peer)
6019 {
6020 	struct dp_peer *bss_peer = NULL;
6021 
6022 	if (!vdev) {
6023 		dp_err("vdev is NULL");
6024 	} else {
6025 		if (vdev->vap_bss_peer == peer)
6026 		    vdev->vap_bss_peer = NULL;
6027 
6028 		if (vdev && vdev->vap_bss_peer) {
6029 		    bss_peer = vdev->vap_bss_peer;
6030 		    DP_UPDATE_STATS(vdev, peer);
6031 		}
6032 	}
6033 }
6034 
6035 /*
6036  * dp_peer_release_mem() - free dp peer handle memory
6037  * @soc: dataptah soc handle
6038  * @pdev: datapath pdev handle
6039  * @peer: datapath peer handle
6040  * @vdev_opmode: Vdev operation mode
6041  * @vdev_mac_addr: Vdev Mac address
6042  *
6043  * Return: None
6044  */
6045 static void dp_peer_release_mem(struct dp_soc *soc,
6046 				struct dp_pdev *pdev,
6047 				struct dp_peer *peer,
6048 				enum wlan_op_mode vdev_opmode,
6049 				uint8_t *vdev_mac_addr)
6050 {
6051 	if (soc->cdp_soc.ol_ops->peer_unref_delete)
6052 		soc->cdp_soc.ol_ops->peer_unref_delete(
6053 				soc->ctrl_psoc,
6054 				pdev->pdev_id,
6055 				peer->mac_addr.raw, vdev_mac_addr,
6056 				vdev_opmode);
6057 
6058 	/*
6059 	 * Peer AST list hast to be empty here
6060 	 */
6061 	DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
6062 
6063 	qdf_mem_free(peer);
6064 }
6065 
6066 /**
6067  * dp_delete_pending_vdev() - check and process vdev delete
6068  * @pdev: DP specific pdev pointer
6069  * @vdev: DP specific vdev pointer
6070  * @vdev_id: vdev id corresponding to vdev
6071  *
6072  * This API does following:
6073  * 1) It releases tx flow pools buffers as vdev is
6074  *    going down and no peers are associated.
6075  * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory
6076  */
6077 static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev,
6078 				   uint8_t vdev_id)
6079 {
6080 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
6081 	void *vdev_delete_context = NULL;
6082 
6083 	vdev_delete_cb = vdev->delete.callback;
6084 	vdev_delete_context = vdev->delete.context;
6085 
6086 	dp_info("deleting vdev object %pK (%pM)- its last peer is done",
6087 		vdev, vdev->mac_addr.raw);
6088 	/* all peers are gone, go ahead and delete it */
6089 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
6090 			FLOW_TYPE_VDEV, vdev_id);
6091 	dp_tx_vdev_detach(vdev);
6092 
6093 	pdev->soc->vdev_id_map[vdev_id] = NULL;
6094 
6095 	if (wlan_op_mode_monitor == vdev->opmode) {
6096 		pdev->monitor_vdev = NULL;
6097 	} else {
6098 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
6099 		TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
6100 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6101 	}
6102 
6103 	dp_info("deleting vdev object %pK (%pM)",
6104 		vdev, vdev->mac_addr.raw);
6105 	qdf_mem_free(vdev);
6106 	vdev = NULL;
6107 
6108 	if (vdev_delete_cb)
6109 		vdev_delete_cb(vdev_delete_context);
6110 }
6111 
6112 /*
6113  * dp_peer_unref_delete() - unref and delete peer
6114  * @peer_handle:		Datapath peer handle
6115  *
6116  */
6117 void dp_peer_unref_delete(struct dp_peer *peer)
6118 {
6119 	struct dp_vdev *vdev = peer->vdev;
6120 	struct dp_pdev *pdev = vdev->pdev;
6121 	struct dp_soc *soc = pdev->soc;
6122 	struct dp_peer *tmppeer;
6123 	int found = 0;
6124 	uint16_t peer_id;
6125 	uint16_t vdev_id;
6126 	bool vdev_delete = false;
6127 	struct cdp_peer_cookie peer_cookie;
6128 	enum wlan_op_mode vdev_opmode;
6129 	uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE];
6130 
6131 
6132 	/*
6133 	 * Hold the lock all the way from checking if the peer ref count
6134 	 * is zero until the peer references are removed from the hash
6135 	 * table and vdev list (if the peer ref count is zero).
6136 	 * This protects against a new HL tx operation starting to use the
6137 	 * peer object just after this function concludes it's done being used.
6138 	 * Furthermore, the lock needs to be held while checking whether the
6139 	 * vdev's list of peers is empty, to make sure that list is not modified
6140 	 * concurrently with the empty check.
6141 	 */
6142 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
6143 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
6144 		peer_id = peer->peer_ids[0];
6145 		vdev_id = vdev->vdev_id;
6146 
6147 		/*
6148 		 * Make sure that the reference to the peer in
6149 		 * peer object map is removed
6150 		 */
6151 		if (peer_id != HTT_INVALID_PEER)
6152 			soc->peer_id_to_obj_map[peer_id] = NULL;
6153 
6154 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
6155 			  "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
6156 
6157 		/* remove the reference to the peer from the hash table */
6158 		dp_peer_find_hash_remove(soc, peer);
6159 
6160 		qdf_spin_lock_bh(&soc->ast_lock);
6161 		if (peer->self_ast_entry) {
6162 			dp_peer_del_ast(soc, peer->self_ast_entry);
6163 			peer->self_ast_entry = NULL;
6164 		}
6165 		qdf_spin_unlock_bh(&soc->ast_lock);
6166 
6167 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
6168 			if (tmppeer == peer) {
6169 				found = 1;
6170 				break;
6171 			}
6172 		}
6173 
6174 		if (found) {
6175 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
6176 				peer_list_elem);
6177 		} else {
6178 			/*Ignoring the remove operation as peer not found*/
6179 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
6180 				  "peer:%pK not found in vdev:%pK peerlist:%pK",
6181 				  peer, vdev, &peer->vdev->peer_list);
6182 		}
6183 
6184 		/* send peer destroy event to upper layer */
6185 		qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
6186 			     QDF_MAC_ADDR_SIZE);
6187 		peer_cookie.ctx = NULL;
6188 		peer_cookie.ctx = (struct cdp_stats_cookie *)
6189 					peer->wlanstats_ctx;
6190 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6191 		dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
6192 				     pdev->soc,
6193 				     (void *)&peer_cookie,
6194 				     peer->peer_ids[0],
6195 				     WDI_NO_VAL,
6196 				     pdev->pdev_id);
6197 #endif
6198 		peer->wlanstats_ctx = NULL;
6199 
6200 		/* cleanup the peer data */
6201 		dp_peer_cleanup(vdev, peer, false);
6202 		/* reset this peer related info in vdev */
6203 		dp_vdev_reset_peer(vdev, peer);
6204 		/* save vdev related member in case vdev freed */
6205 		vdev_opmode = vdev->opmode;
6206 		qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw,
6207 			     QDF_MAC_ADDR_SIZE);
6208 		/*
6209 		 * check whether the parent vdev is pending for deleting
6210 		 * and no peers left.
6211 		 */
6212 		if (vdev->delete.pending && TAILQ_EMPTY(&vdev->peer_list))
6213 			vdev_delete = true;
6214 		/*
6215 		 * Now that there are no references to the peer, we can
6216 		 * release the peer reference lock.
6217 		 */
6218 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6219 
6220 		/*
6221 		 * Invoke soc.ol_ops->peer_unref_delete out of
6222 		 * peer_ref_mutex in case deadlock issue.
6223 		 */
6224 		dp_peer_release_mem(soc, pdev, peer,
6225 				    vdev_opmode,
6226 				    vdev_mac_addr);
6227 		/*
6228 		 * Delete the vdev if it's waiting all peer deleted
6229 		 * and it's chance now.
6230 		 */
6231 		if (vdev_delete)
6232 			dp_delete_pending_vdev(pdev, vdev, vdev_id);
6233 
6234 	} else {
6235 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6236 	}
6237 }
6238 
6239 #ifdef PEER_CACHE_RX_PKTS
6240 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
6241 {
6242 	dp_rx_flush_rx_cached(peer, true);
6243 	qdf_list_destroy(&peer->bufq_info.cached_bufq);
6244 	qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
6245 }
6246 #else
6247 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
6248 {
6249 }
6250 #endif
6251 
6252 /*
6253  * dp_peer_detach_wifi3() – Detach txrx peer
6254  * @soc: soc handle
6255  * @vdev_id: id of dp handle
6256  * @peer_mac: mac of datapath PEER handle
6257  * @bitmap: bitmap indicating special handling of request.
6258  *
6259  */
6260 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
6261 				       uint8_t *peer_mac, uint32_t bitmap)
6262 {
6263 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
6264 						      peer_mac, 0, vdev_id);
6265 
6266 	/* Peer can be null for monitor vap mac address */
6267 	if (!peer) {
6268 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
6269 			  "%s: Invalid peer\n", __func__);
6270 		return QDF_STATUS_E_FAILURE;
6271 	}
6272 
6273 	peer->valid = 0;
6274 
6275 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
6276 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
6277 
6278 	dp_local_peer_id_free(peer->vdev->pdev, peer);
6279 
6280 	dp_peer_rx_bufq_resources_deinit(peer);
6281 
6282 	qdf_spinlock_destroy(&peer->peer_info_lock);
6283 	dp_peer_multipass_list_remove(peer);
6284 
6285 	if (wlan_op_mode_sta == peer->vdev->opmode &&
6286 	    qdf_mem_cmp(peer->mac_addr.raw, peer->vdev->mac_addr.raw,
6287 			QDF_MAC_ADDR_SIZE) != 0) {
6288 		dp_set_ignore_reo_status_cb(peer->vdev->pdev->soc, true);
6289 	}
6290 
6291 	/*
6292 	 * Remove the reference added during peer_attach.
6293 	 * The peer will still be left allocated until the
6294 	 * PEER_UNMAP message arrives to remove the other
6295 	 * reference, added by the PEER_MAP message.
6296 	 */
6297 	dp_peer_unref_delete(peer);
6298 	/*
6299 	 * Remove the reference taken above
6300 	 */
6301 	dp_peer_unref_delete(peer);
6302 
6303 	return QDF_STATUS_SUCCESS;
6304 }
6305 
6306 /*
6307  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
6308  * @soc_hdl: Datapath soc handle
6309  * @vdev_id: virtual interface id
6310  *
6311  * Return: MAC address on success, NULL on failure.
6312  *
6313  */
6314 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl,
6315 					 uint8_t vdev_id)
6316 {
6317 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6318 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
6319 
6320 	if (!vdev)
6321 		return NULL;
6322 
6323 	return vdev->mac_addr.raw;
6324 }
6325 
6326 /*
6327  * dp_vdev_set_wds() - Enable per packet stats
6328  * @soc: DP soc handle
6329  * @vdev_id: id of DP VDEV handle
6330  * @val: value
6331  *
6332  * Return: none
6333  */
6334 static int dp_vdev_set_wds(struct cdp_soc_t *soc, uint8_t vdev_id, uint32_t val)
6335 {
6336 	struct dp_vdev *vdev =
6337 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
6338 						   vdev_id);
6339 
6340 	if (!vdev)
6341 		return QDF_STATUS_E_FAILURE;
6342 
6343 	vdev->wds_enabled = val;
6344 	return QDF_STATUS_SUCCESS;
6345 }
6346 
6347 /*
6348  * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode
6349  * @soc_hdl: datapath soc handle
6350  * @pdev_id: physical device instance id
6351  *
6352  * Return: virtual interface id
6353  */
6354 static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl,
6355 					       uint8_t pdev_id)
6356 {
6357 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6358 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
6359 
6360 	if (qdf_unlikely(!pdev))
6361 		return -EINVAL;
6362 
6363 	return pdev->monitor_vdev->vdev_id;
6364 }
6365 
6366 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
6367 {
6368 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6369 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
6370 
6371 	if (!vdev) {
6372 		dp_err("vdev for id %d is NULL", vdev_id);
6373 		return -EINVAL;
6374 	}
6375 
6376 	return vdev->opmode;
6377 }
6378 
6379 /**
6380  * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev
6381  * @soc_hdl: ol_txrx_soc_handle handle
6382  * @vdev_id: vdev id for which os rx handles are needed
6383  * @stack_fn_p: pointer to stack function pointer
6384  * @osif_handle_p: pointer to ol_osif_vdev_handle
6385  *
6386  * Return: void
6387  */
6388 static
6389 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl,
6390 					  uint8_t vdev_id,
6391 					  ol_txrx_rx_fp *stack_fn_p,
6392 					  ol_osif_vdev_handle *osif_vdev_p)
6393 {
6394 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6395 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
6396 
6397 	if (!vdev)
6398 		return;
6399 
6400 	*stack_fn_p = vdev->osif_rx_stack;
6401 	*osif_vdev_p = vdev->osif_vdev;
6402 }
6403 
6404 /**
6405  * dp_get_ctrl_pdev_from_vdev() - Get control pdev of vdev
6406  * @soc_hdl: datapath soc handle
6407  * @vdev_id: virtual device/interface id
6408  *
6409  * Return: Handle to control pdev
6410  */
6411 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(
6412 						struct cdp_soc_t *soc_hdl,
6413 						uint8_t vdev_id)
6414 {
6415 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6416 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
6417 	struct dp_pdev *pdev;
6418 
6419 	if (!vdev || !vdev->pdev)
6420 		return NULL;
6421 
6422 	pdev = vdev->pdev;
6423 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
6424 }
6425 
6426 /**
6427  * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
6428  *                                 ring based on target
6429  * @soc: soc handle
6430  * @mac_for_pdev: pdev_id
6431  * @pdev: physical device handle
6432  * @ring_num: mac id
6433  * @htt_tlv_filter: tlv filter
6434  *
6435  * Return: zero on success, non-zero on failure
6436  */
6437 static inline
6438 QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
6439 				       struct dp_pdev *pdev, uint8_t ring_num,
6440 				       struct htt_rx_ring_tlv_filter htt_tlv_filter)
6441 {
6442 	QDF_STATUS status;
6443 
6444 	if (soc->wlan_cfg_ctx->rxdma1_enable)
6445 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6446 					     pdev->rxdma_mon_buf_ring[ring_num]
6447 					     .hal_srng,
6448 					     RXDMA_MONITOR_BUF, RX_BUFFER_SIZE,
6449 					     &htt_tlv_filter);
6450 	else
6451 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6452 					     pdev->rx_mac_buf_ring[ring_num]
6453 					     .hal_srng,
6454 					     RXDMA_BUF, RX_BUFFER_SIZE,
6455 					     &htt_tlv_filter);
6456 
6457 	return status;
6458 }
6459 
6460 static inline void
6461 dp_pdev_disable_mcopy_code(struct dp_pdev *pdev)
6462 {
6463 	pdev->mcopy_mode = 0;
6464 	qdf_nbuf_queue_free(&pdev->rx_ppdu_buf_q);
6465 }
6466 
6467 /**
6468  * dp_reset_monitor_mode() - Disable monitor mode
6469  * @pdev_handle: Datapath PDEV handle
6470  *
6471  * Return: QDF_STATUS
6472  */
6473 QDF_STATUS dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
6474 {
6475 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6476 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
6477 	struct dp_soc *soc = pdev->soc;
6478 	uint8_t pdev_id;
6479 	int mac_id;
6480 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6481 
6482 	pdev_id = pdev->pdev_id;
6483 	soc = pdev->soc;
6484 
6485 	qdf_spin_lock_bh(&pdev->mon_lock);
6486 
6487 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
6488 
6489 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6490 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
6491 
6492 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
6493 						     pdev, mac_id,
6494 						     htt_tlv_filter);
6495 
6496 		if (status != QDF_STATUS_SUCCESS) {
6497 			dp_err("Failed to send tlv filter for monitor mode rings");
6498 			qdf_spin_unlock_bh(&pdev->mon_lock);
6499 			return status;
6500 		}
6501 
6502 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6503 			    pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6504 			    RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE,
6505 			    &htt_tlv_filter);
6506 	}
6507 
6508 	pdev->monitor_vdev = NULL;
6509 	if (pdev->mcopy_mode)
6510 		dp_pdev_disable_mcopy_code(pdev);
6511 	pdev->monitor_configured = false;
6512 
6513 	qdf_spin_unlock_bh(&pdev->mon_lock);
6514 
6515 	return QDF_STATUS_SUCCESS;
6516 }
6517 
6518 /**
6519  * dp_set_nac() - set peer_nac
6520  * @soc: soc handle
6521  * @vdev_id: id of dp handle
6522  * @peer_mac: mac of datapath PEER handle
6523  *
6524  * Return: void
6525  */
6526 static void dp_set_nac(struct cdp_soc_t *soc, uint8_t vdev_id,
6527 		       uint8_t *peer_mac)
6528 {
6529 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
6530 						       peer_mac, 0, vdev_id);
6531 
6532 	if (!peer || peer->delete_in_progress)
6533 		goto fail;
6534 
6535 	peer->nac = 1;
6536 
6537 fail:
6538 	if (peer)
6539 		dp_peer_unref_delete(peer);
6540 
6541 	return;
6542 }
6543 
6544 /**
6545  * dp_get_tx_pending() - read pending tx
6546  * @pdev_handle: Datapath PDEV handle
6547  *
6548  * Return: outstanding tx
6549  */
6550 static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
6551 {
6552 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6553 
6554 	return qdf_atomic_read(&pdev->num_tx_outstanding);
6555 }
6556 
6557 /**
6558  * dp_get_peer_mac_from_peer_id() - get peer mac
6559  * @pdev_handle: Datapath PDEV handle
6560  * @peer_id: Peer ID
6561  * @peer_mac: MAC addr of PEER
6562  *
6563  * Return: QDF_STATUS
6564  */
6565 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
6566 					       uint32_t peer_id,
6567 					       uint8_t *peer_mac)
6568 {
6569 	struct dp_peer *peer;
6570 
6571 	if (soc && peer_mac) {
6572 		peer = dp_peer_find_by_id((struct dp_soc *)soc,
6573 					  (uint16_t)peer_id);
6574 		if (peer) {
6575 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
6576 				     QDF_MAC_ADDR_SIZE);
6577 			dp_peer_unref_del_find_by_id(peer);
6578 			return QDF_STATUS_SUCCESS;
6579 		}
6580 	}
6581 
6582 	return QDF_STATUS_E_FAILURE;
6583 }
6584 
6585 /**
6586  * dp_pdev_configure_monitor_rings() - configure monitor rings
6587  * @vdev_handle: Datapath VDEV handle
6588  *
6589  * Return: QDF_STATUS
6590  */
6591 QDF_STATUS dp_pdev_configure_monitor_rings(struct dp_pdev *pdev)
6592 {
6593 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
6594 	struct dp_soc *soc;
6595 	uint8_t pdev_id;
6596 	int mac_id;
6597 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6598 
6599 	pdev_id = pdev->pdev_id;
6600 	soc = pdev->soc;
6601 
6602 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
6603 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
6604 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
6605 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
6606 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
6607 		pdev->mo_data_filter);
6608 
6609 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
6610 
6611 	htt_tlv_filter.mpdu_start = 1;
6612 	htt_tlv_filter.msdu_start = 1;
6613 	htt_tlv_filter.packet = 1;
6614 	htt_tlv_filter.msdu_end = 1;
6615 	htt_tlv_filter.mpdu_end = 1;
6616 	htt_tlv_filter.packet_header = 1;
6617 	htt_tlv_filter.attention = 1;
6618 	htt_tlv_filter.ppdu_start = 0;
6619 	htt_tlv_filter.ppdu_end = 0;
6620 	htt_tlv_filter.ppdu_end_user_stats = 0;
6621 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
6622 	htt_tlv_filter.ppdu_end_status_done = 0;
6623 	htt_tlv_filter.header_per_msdu = 1;
6624 	htt_tlv_filter.enable_fp =
6625 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
6626 	htt_tlv_filter.enable_md = 0;
6627 	htt_tlv_filter.enable_mo =
6628 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
6629 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
6630 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
6631 
6632 	if (pdev->mcopy_mode) {
6633 		htt_tlv_filter.fp_data_filter = 0;
6634 		htt_tlv_filter.mo_data_filter = 0;
6635 	} else {
6636 		htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
6637 		htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
6638 	}
6639 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
6640 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
6641 	htt_tlv_filter.offset_valid = false;
6642 
6643 	if ((pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU) ||
6644 	    (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU_MSDU)) {
6645 		htt_tlv_filter.fp_mgmt_filter = 0;
6646 		htt_tlv_filter.fp_ctrl_filter = 0;
6647 		htt_tlv_filter.fp_data_filter = 0;
6648 		htt_tlv_filter.mo_mgmt_filter = 0;
6649 		htt_tlv_filter.mo_ctrl_filter = 0;
6650 		htt_tlv_filter.mo_data_filter = 0;
6651 	}
6652 
6653 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6654 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
6655 
6656 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
6657 						     pdev, mac_id,
6658 						     htt_tlv_filter);
6659 
6660 		if (status != QDF_STATUS_SUCCESS) {
6661 			dp_err("Failed to send tlv filter for monitor mode rings");
6662 			return status;
6663 		}
6664 	}
6665 
6666 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
6667 
6668 	htt_tlv_filter.mpdu_start = 1;
6669 	htt_tlv_filter.msdu_start = 0;
6670 	htt_tlv_filter.packet = 0;
6671 	htt_tlv_filter.msdu_end = 0;
6672 	htt_tlv_filter.mpdu_end = 0;
6673 	if ((pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU) ||
6674 	    (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU_MSDU)) {
6675 		htt_tlv_filter.mpdu_end = 1;
6676 	}
6677 	htt_tlv_filter.attention = 0;
6678 	htt_tlv_filter.ppdu_start = 1;
6679 	htt_tlv_filter.ppdu_end = 1;
6680 	htt_tlv_filter.ppdu_end_user_stats = 1;
6681 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6682 	htt_tlv_filter.ppdu_end_status_done = 1;
6683 	htt_tlv_filter.enable_fp = 1;
6684 	htt_tlv_filter.enable_md = 0;
6685 	htt_tlv_filter.enable_mo = 1;
6686 	if (pdev->mcopy_mode ||
6687 	    (pdev->rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) {
6688 		htt_tlv_filter.packet_header = 1;
6689 		if (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU) {
6690 			htt_tlv_filter.header_per_msdu = 0;
6691 			htt_tlv_filter.enable_mo = 0;
6692 		} else if (pdev->rx_enh_capture_mode ==
6693 			   CDP_RX_ENH_CAPTURE_MPDU_MSDU) {
6694 			bool is_rx_mon_proto_flow_tag_enabled =
6695 			    wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(
6696 						    soc->wlan_cfg_ctx);
6697 			htt_tlv_filter.header_per_msdu = 1;
6698 			htt_tlv_filter.enable_mo = 0;
6699 			if (pdev->is_rx_enh_capture_trailer_enabled ||
6700 			    is_rx_mon_proto_flow_tag_enabled)
6701 				htt_tlv_filter.msdu_end = 1;
6702 		}
6703 	}
6704 
6705 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6706 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6707 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6708 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6709 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6710 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
6711 	htt_tlv_filter.offset_valid = false;
6712 
6713 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6714 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6715 						pdev->pdev_id);
6716 		/*
6717 		 * If two back to back HTT msg sending happened in
6718 		 * short time, the second HTT msg source SRNG HP
6719 		 * writing has chance to fail, this has been confirmed
6720 		 * by HST HW.
6721 		 * for monitor mode, here is the last HTT msg for sending.
6722 		 * if the 2nd HTT msg for monitor status ring sending failed,
6723 		 * HW won't provide anything into 2nd monitor status ring.
6724 		 * as a WAR, add some delay before 2nd HTT msg start sending,
6725 		 * > 2us is required per HST HW, delay 100 us for safe.
6726 		 */
6727 		if (mac_id)
6728 			qdf_udelay(100);
6729 
6730 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6731 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6732 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6733 	}
6734 
6735 	return status;
6736 }
6737 
6738 /**
6739  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
6740  * @vdev_handle: Datapath VDEV handle
6741  * @smart_monitor: Flag to denote if its smart monitor mode
6742  *
6743  * Return: 0 on success, not 0 on failure
6744  */
6745 static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *soc,
6746 					   uint8_t vdev_id,
6747 					   uint8_t special_monitor)
6748 {
6749 	struct dp_pdev *pdev;
6750 	struct dp_vdev *vdev =
6751 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
6752 						   vdev_id);
6753 
6754 	if (!vdev)
6755 		return QDF_STATUS_E_FAILURE;
6756 
6757 	pdev = vdev->pdev;
6758 	pdev->monitor_vdev = vdev;
6759 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
6760 		  "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
6761 		  pdev, pdev->pdev_id, pdev->soc, vdev);
6762 
6763 	/*
6764 	 * do not configure monitor buf ring and filter for smart and
6765 	 * lite monitor
6766 	 * for smart monitor filters are added along with first NAC
6767 	 * for lite monitor required configuration done through
6768 	 * dp_set_pdev_param
6769 	 */
6770 	if (special_monitor)
6771 		return QDF_STATUS_SUCCESS;
6772 
6773 	/*Check if current pdev's monitor_vdev exists */
6774 	if (pdev->monitor_configured) {
6775 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
6776 			  "monitor vap already created vdev=%pK\n", vdev);
6777 		return QDF_STATUS_E_RESOURCES;
6778 	}
6779 
6780 	pdev->monitor_configured = true;
6781 
6782 	dp_mon_buf_delayed_replenish(pdev);
6783 
6784 	return dp_pdev_configure_monitor_rings(pdev);
6785 }
6786 
6787 /**
6788  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
6789  * @pdev_handle: Datapath PDEV handle
6790  * @filter_val: Flag to select Filter for monitor mode
6791  * Return: 0 on success, not 0 on failure
6792  */
6793 static QDF_STATUS
6794 dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
6795 				   struct cdp_monitor_filter *filter_val)
6796 {
6797 	/* Many monitor VAPs can exists in a system but only one can be up at
6798 	 * anytime
6799 	 */
6800 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6801 	struct dp_vdev *vdev = pdev->monitor_vdev;
6802 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
6803 	struct dp_soc *soc;
6804 	uint8_t pdev_id;
6805 	int mac_id;
6806 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6807 
6808 	pdev_id = pdev->pdev_id;
6809 	soc = pdev->soc;
6810 
6811 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
6812 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
6813 		pdev, pdev_id, soc, vdev);
6814 
6815 	/*Check if current pdev's monitor_vdev exists */
6816 	if (!pdev->monitor_vdev) {
6817 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6818 			"vdev=%pK", vdev);
6819 		qdf_assert(vdev);
6820 	}
6821 
6822 	/* update filter mode, type in pdev structure */
6823 	pdev->mon_filter_mode = filter_val->mode;
6824 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
6825 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
6826 	pdev->fp_data_filter = filter_val->fp_data;
6827 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
6828 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
6829 	pdev->mo_data_filter = filter_val->mo_data;
6830 
6831 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
6832 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
6833 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
6834 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
6835 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
6836 		pdev->mo_data_filter);
6837 
6838 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
6839 
6840 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6841 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
6842 
6843 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
6844 						     pdev, mac_id,
6845 						     htt_tlv_filter);
6846 
6847 		if (status != QDF_STATUS_SUCCESS) {
6848 			dp_err("Failed to send tlv filter for monitor mode rings");
6849 			return status;
6850 		}
6851 
6852 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6853 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6854 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6855 	}
6856 
6857 	htt_tlv_filter.mpdu_start = 1;
6858 	htt_tlv_filter.msdu_start = 1;
6859 	htt_tlv_filter.packet = 1;
6860 	htt_tlv_filter.msdu_end = 1;
6861 	htt_tlv_filter.mpdu_end = 1;
6862 	htt_tlv_filter.packet_header = 1;
6863 	htt_tlv_filter.attention = 1;
6864 	htt_tlv_filter.ppdu_start = 0;
6865 	htt_tlv_filter.ppdu_end = 0;
6866 	htt_tlv_filter.ppdu_end_user_stats = 0;
6867 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
6868 	htt_tlv_filter.ppdu_end_status_done = 0;
6869 	htt_tlv_filter.header_per_msdu = 1;
6870 	htt_tlv_filter.enable_fp =
6871 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
6872 	htt_tlv_filter.enable_md = 0;
6873 	htt_tlv_filter.enable_mo =
6874 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
6875 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
6876 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
6877 	if (pdev->mcopy_mode)
6878 		htt_tlv_filter.fp_data_filter = 0;
6879 	else
6880 		htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
6881 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
6882 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
6883 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
6884 	htt_tlv_filter.offset_valid = false;
6885 
6886 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6887 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
6888 
6889 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
6890 						     pdev, mac_id,
6891 						     htt_tlv_filter);
6892 
6893 		if (status != QDF_STATUS_SUCCESS) {
6894 			dp_err("Failed to send tlv filter for monitor mode rings");
6895 			return status;
6896 		}
6897 	}
6898 
6899 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
6900 
6901 	htt_tlv_filter.mpdu_start = 1;
6902 	htt_tlv_filter.msdu_start = 0;
6903 	htt_tlv_filter.packet = 0;
6904 	htt_tlv_filter.msdu_end = 0;
6905 	htt_tlv_filter.mpdu_end = 0;
6906 	htt_tlv_filter.attention = 0;
6907 	htt_tlv_filter.ppdu_start = 1;
6908 	htt_tlv_filter.ppdu_end = 1;
6909 	htt_tlv_filter.ppdu_end_user_stats = 1;
6910 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6911 	htt_tlv_filter.ppdu_end_status_done = 1;
6912 	htt_tlv_filter.enable_fp = 1;
6913 	htt_tlv_filter.enable_md = 0;
6914 	htt_tlv_filter.enable_mo = 1;
6915 	if (pdev->mcopy_mode) {
6916 		htt_tlv_filter.packet_header = 1;
6917 	}
6918 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6919 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6920 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6921 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6922 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6923 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
6924 	htt_tlv_filter.offset_valid = false;
6925 
6926 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6927 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6928 						pdev->pdev_id);
6929 
6930 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6931 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6932 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6933 	}
6934 
6935 	return QDF_STATUS_SUCCESS;
6936 }
6937 
6938 /**
6939  * dp_pdev_set_monitor_channel() - set monitor channel num in pdev
6940  * @pdev_handle: Datapath PDEV handle
6941  *
6942  * Return: None
6943  */
6944 static
6945 void dp_pdev_set_monitor_channel(struct cdp_pdev *pdev_handle, int chan_num)
6946 {
6947 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6948 
6949 	pdev->mon_chan_num = chan_num;
6950 }
6951 
6952 /**
6953  * dp_deliver_tx_mgmt() - Deliver mgmt frame for tx capture
6954  * @pdev_handle: Datapath PDEV handle
6955  * @nbuf: Management frame buffer
6956  */
6957 static void
6958 dp_deliver_tx_mgmt(struct cdp_pdev *pdev_handle, qdf_nbuf_t nbuf)
6959 {
6960 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6961 
6962 	dp_deliver_mgmt_frm(pdev, nbuf);
6963 }
6964 
6965 /**
6966  * dp_set_bsscolor() - sets bsscolor for tx capture
6967  * @pdev_handle: Datapath PDEV handle
6968  * @bsscolor: new bsscolor
6969  */
6970 static void
6971 dp_mon_set_bsscolor(struct cdp_pdev *pdev_handle, uint8_t bsscolor)
6972 {
6973 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6974 
6975 	pdev->rx_mon_recv_status.bsscolor = bsscolor;
6976 }
6977 
6978 /**
6979  * dp_get_pdev_id_frm_pdev() - get pdev_id
6980  * @pdev_handle: Datapath PDEV handle
6981  *
6982  * Return: pdev_id
6983  */
6984 static
6985 uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
6986 {
6987 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6988 
6989 	return pdev->pdev_id;
6990 }
6991 
6992 /**
6993  * dp_get_delay_stats_flag() - get delay stats flag
6994  * @pdev_handle: Datapath PDEV handle
6995  *
6996  * Return: 0 if flag is disabled else 1
6997  */
6998 static
6999 bool dp_get_delay_stats_flag(struct cdp_pdev *pdev_handle)
7000 {
7001 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7002 
7003 	return pdev->delay_stats_flag;
7004 }
7005 
7006 /**
7007  * dp_pdev_set_chan_noise_floor() - set channel noise floor
7008  * @pdev_handle: Datapath PDEV handle
7009  * @chan_noise_floor: Channel Noise Floor
7010  *
7011  * Return: void
7012  */
7013 static
7014 void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
7015 				  int16_t chan_noise_floor)
7016 {
7017 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7018 
7019 	pdev->chan_noise_floor = chan_noise_floor;
7020 }
7021 
7022 /**
7023  * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
7024  * @vdev_handle: Datapath VDEV handle
7025  * Return: true on ucast filter flag set
7026  */
7027 static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
7028 {
7029 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7030 	struct dp_pdev *pdev;
7031 
7032 	pdev = vdev->pdev;
7033 
7034 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
7035 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
7036 		return true;
7037 
7038 	return false;
7039 }
7040 
7041 /**
7042  * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
7043  * @vdev_handle: Datapath VDEV handle
7044  * Return: true on mcast filter flag set
7045  */
7046 static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
7047 {
7048 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7049 	struct dp_pdev *pdev;
7050 
7051 	pdev = vdev->pdev;
7052 
7053 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
7054 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
7055 		return true;
7056 
7057 	return false;
7058 }
7059 
7060 /**
7061  * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
7062  * @vdev_handle: Datapath VDEV handle
7063  * Return: true on non data filter flag set
7064  */
7065 static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
7066 {
7067 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7068 	struct dp_pdev *pdev;
7069 
7070 	pdev = vdev->pdev;
7071 
7072 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
7073 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
7074 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
7075 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
7076 			return true;
7077 		}
7078 	}
7079 
7080 	return false;
7081 }
7082 
7083 #ifdef MESH_MODE_SUPPORT
7084 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
7085 {
7086 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7087 
7088 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7089 		FL("val %d"), val);
7090 	vdev->mesh_vdev = val;
7091 }
7092 
7093 /*
7094  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
7095  * @vdev_hdl: virtual device object
7096  * @val: value to be set
7097  *
7098  * Return: void
7099  */
7100 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
7101 {
7102 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7103 
7104 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7105 		FL("val %d"), val);
7106 	vdev->mesh_rx_filter = val;
7107 }
7108 #endif
7109 
7110 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
7111 {
7112 	uint8_t pdev_count;
7113 
7114 	for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
7115 		if (soc->pdev_list[pdev_count] &&
7116 		    soc->pdev_list[pdev_count] == data)
7117 			return true;
7118 	}
7119 	return false;
7120 }
7121 
7122 /**
7123  * dp_rx_bar_stats_cb(): BAR received stats callback
7124  * @soc: SOC handle
7125  * @cb_ctxt: Call back context
7126  * @reo_status: Reo status
7127  *
7128  * return: void
7129  */
7130 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
7131 	union hal_reo_status *reo_status)
7132 {
7133 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
7134 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
7135 
7136 	if (!dp_check_pdev_exists(soc, pdev)) {
7137 		dp_err_rl("pdev doesn't exist");
7138 		return;
7139 	}
7140 
7141 	if (!qdf_atomic_read(&soc->cmn_init_done))
7142 		return;
7143 
7144 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
7145 		DP_PRINT_STATS("REO stats failure %d",
7146 			       queue_status->header.status);
7147 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
7148 		return;
7149 	}
7150 
7151 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
7152 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
7153 
7154 }
7155 
7156 /**
7157  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
7158  * @vdev: DP VDEV handle
7159  *
7160  * return: void
7161  */
7162 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
7163 			     struct cdp_vdev_stats *vdev_stats)
7164 {
7165 	struct dp_peer *peer = NULL;
7166 	struct dp_soc *soc = NULL;
7167 
7168 	if (!vdev || !vdev->pdev)
7169 		return;
7170 
7171 	soc = vdev->pdev->soc;
7172 
7173 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
7174 
7175 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
7176 		dp_update_vdev_stats(vdev_stats, peer);
7177 
7178 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7179 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7180 			     vdev_stats, vdev->vdev_id,
7181 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
7182 #endif
7183 }
7184 
7185 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
7186 {
7187 	struct dp_vdev *vdev = NULL;
7188 	struct dp_soc *soc;
7189 	struct cdp_vdev_stats *vdev_stats =
7190 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
7191 
7192 	if (!vdev_stats) {
7193 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7194 			  "DP alloc failure - unable to get alloc vdev stats");
7195 		return;
7196 	}
7197 
7198 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
7199 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
7200 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
7201 
7202 	if (pdev->mcopy_mode)
7203 		DP_UPDATE_STATS(pdev, pdev->invalid_peer);
7204 
7205 	soc = pdev->soc;
7206 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
7207 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
7208 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
7209 
7210 		dp_aggregate_vdev_stats(vdev, vdev_stats);
7211 		dp_update_pdev_stats(pdev, vdev_stats);
7212 		dp_update_pdev_ingress_stats(pdev, vdev);
7213 	}
7214 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
7215 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
7216 	qdf_mem_free(vdev_stats);
7217 
7218 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7219 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
7220 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
7221 #endif
7222 }
7223 
7224 /**
7225  * dp_vdev_getstats() - get vdev packet level stats
7226  * @vdev_handle: Datapath VDEV handle
7227  * @stats: cdp network device stats structure
7228  *
7229  * Return: QDF_STATUS
7230  */
7231 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
7232 				   struct cdp_dev_stats *stats)
7233 {
7234 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7235 	struct dp_pdev *pdev;
7236 	struct dp_soc *soc;
7237 	struct cdp_vdev_stats *vdev_stats;
7238 
7239 	if (!vdev)
7240 		return QDF_STATUS_E_FAILURE;
7241 
7242 	pdev = vdev->pdev;
7243 	if (!pdev)
7244 		return QDF_STATUS_E_FAILURE;
7245 
7246 	soc = pdev->soc;
7247 
7248 	vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
7249 
7250 	if (!vdev_stats) {
7251 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7252 			  "DP alloc failure - unable to get alloc vdev stats");
7253 		return QDF_STATUS_E_FAILURE;
7254 	}
7255 
7256 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
7257 	dp_aggregate_vdev_stats(vdev, vdev_stats);
7258 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
7259 
7260 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
7261 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
7262 
7263 	stats->tx_errors = vdev_stats->tx.tx_failed +
7264 		vdev_stats->tx_i.dropped.dropped_pkt.num;
7265 	stats->tx_dropped = stats->tx_errors;
7266 
7267 	stats->rx_packets = vdev_stats->rx.unicast.num +
7268 		vdev_stats->rx.multicast.num +
7269 		vdev_stats->rx.bcast.num;
7270 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
7271 		vdev_stats->rx.multicast.bytes +
7272 		vdev_stats->rx.bcast.bytes;
7273 
7274 	qdf_mem_free(vdev_stats);
7275 
7276 	return QDF_STATUS_SUCCESS;
7277 }
7278 
7279 
7280 /**
7281  * dp_pdev_getstats() - get pdev packet level stats
7282  * @pdev_handle: Datapath PDEV handle
7283  * @stats: cdp network device stats structure
7284  *
7285  * Return: QDF_STATUS
7286  */
7287 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
7288 			     struct cdp_dev_stats *stats)
7289 {
7290 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7291 
7292 	dp_aggregate_pdev_stats(pdev);
7293 
7294 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
7295 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
7296 
7297 	stats->tx_errors = pdev->stats.tx.tx_failed +
7298 		pdev->stats.tx_i.dropped.dropped_pkt.num;
7299 	stats->tx_dropped = stats->tx_errors;
7300 
7301 	stats->rx_packets = pdev->stats.rx.unicast.num +
7302 		pdev->stats.rx.multicast.num +
7303 		pdev->stats.rx.bcast.num;
7304 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
7305 		pdev->stats.rx.multicast.bytes +
7306 		pdev->stats.rx.bcast.bytes;
7307 	stats->rx_errors = pdev->stats.err.desc_alloc_fail +
7308 		pdev->stats.err.ip_csum_err +
7309 		pdev->stats.err.tcp_udp_csum_err +
7310 		pdev->stats.rx.err.mic_err +
7311 		pdev->stats.rx.err.decrypt_err +
7312 		pdev->stats.err.rxdma_error +
7313 		pdev->stats.err.reo_error;
7314 	stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
7315 		pdev->stats.dropped.mec +
7316 		pdev->stats.dropped.mesh_filter +
7317 		pdev->stats.dropped.wifi_parse +
7318 		pdev->stats.dropped.mon_rx_drop +
7319 		pdev->stats.dropped.mon_radiotap_update_err;
7320 }
7321 
7322 /**
7323  * dp_get_device_stats() - get interface level packet stats
7324  * @soc: soc handle
7325  * @id : vdev_id or pdev_id based on type
7326  * @stats: cdp network device stats structure
7327  * @type: device type pdev/vdev
7328  *
7329  * Return: QDF_STATUS
7330  */
7331 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc, uint8_t id,
7332 				      struct cdp_dev_stats *stats,
7333 				      uint8_t type)
7334 {
7335 	switch (type) {
7336 	case UPDATE_VDEV_STATS:
7337 		return dp_vdev_getstats(
7338 			(struct cdp_vdev *)dp_get_vdev_from_soc_vdev_id_wifi3(
7339 			 (struct dp_soc *)soc, id), stats);
7340 	case UPDATE_PDEV_STATS:
7341 		{
7342 			struct dp_pdev *pdev =
7343 				dp_get_pdev_from_soc_pdev_id_wifi3(
7344 						(struct dp_soc *)soc,
7345 						 id);
7346 			if (pdev) {
7347 				dp_pdev_getstats((struct cdp_pdev *)pdev,
7348 						 stats);
7349 				return QDF_STATUS_SUCCESS;
7350 			}
7351 		}
7352 		break;
7353 	default:
7354 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7355 			"apstats cannot be updated for this input "
7356 			"type %d", type);
7357 		break;
7358 	}
7359 
7360 	return QDF_STATUS_E_FAILURE;
7361 }
7362 
7363 const
7364 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
7365 {
7366 	switch (ring_type) {
7367 	case REO_DST:
7368 		return "Reo_dst";
7369 	case REO_EXCEPTION:
7370 		return "Reo_exception";
7371 	case REO_CMD:
7372 		return "Reo_cmd";
7373 	case REO_REINJECT:
7374 		return "Reo_reinject";
7375 	case REO_STATUS:
7376 		return "Reo_status";
7377 	case WBM2SW_RELEASE:
7378 		return "wbm2sw_release";
7379 	case TCL_DATA:
7380 		return "tcl_data";
7381 	case TCL_CMD:
7382 		return "tcl_cmd";
7383 	case TCL_STATUS:
7384 		return "tcl_status";
7385 	case SW2WBM_RELEASE:
7386 		return "sw2wbm_release";
7387 	case RXDMA_BUF:
7388 		return "Rxdma_buf";
7389 	case RXDMA_DST:
7390 		return "Rxdma_dst";
7391 	case RXDMA_MONITOR_BUF:
7392 		return "Rxdma_monitor_buf";
7393 	case RXDMA_MONITOR_DESC:
7394 		return "Rxdma_monitor_desc";
7395 	case RXDMA_MONITOR_STATUS:
7396 		return "Rxdma_monitor_status";
7397 	default:
7398 		dp_err("Invalid ring type");
7399 		break;
7400 	}
7401 	return "Invalid";
7402 }
7403 
7404 /*
7405  * dp_print_napi_stats(): NAPI stats
7406  * @soc - soc handle
7407  */
7408 void dp_print_napi_stats(struct dp_soc *soc)
7409 {
7410 	hif_print_napi_stats(soc->hif_handle);
7411 }
7412 
7413 /**
7414  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
7415  * @vdev: DP_VDEV handle
7416  *
7417  * Return: QDF_STATUS
7418  */
7419 static inline QDF_STATUS
7420 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
7421 {
7422 	struct dp_peer *peer = NULL;
7423 
7424 	if (!vdev || !vdev->pdev)
7425 		return QDF_STATUS_E_FAILURE;
7426 
7427 	DP_STATS_CLR(vdev->pdev);
7428 	DP_STATS_CLR(vdev->pdev->soc);
7429 	DP_STATS_CLR(vdev);
7430 
7431 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
7432 
7433 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
7434 		if (!peer)
7435 			return QDF_STATUS_E_FAILURE;
7436 		DP_STATS_CLR(peer);
7437 
7438 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7439 		dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7440 				     &peer->stats,  peer->peer_ids[0],
7441 				     UPDATE_PEER_STATS, vdev->pdev->pdev_id);
7442 #endif
7443 	}
7444 
7445 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7446 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7447 			     &vdev->stats,  vdev->vdev_id,
7448 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
7449 #endif
7450 	return QDF_STATUS_SUCCESS;
7451 }
7452 
7453 /*
7454  * dp_get_host_peer_stats()- function to print peer stats
7455  * @soc: dp_soc handle
7456  * @mac_addr: mac address of the peer
7457  *
7458  * Return: QDF_STATUS
7459  */
7460 static QDF_STATUS
7461 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr)
7462 {
7463 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7464 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
7465 						      mac_addr, 0,
7466 						      DP_VDEV_ALL);
7467 	if (!peer || peer->delete_in_progress) {
7468 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7469 			  "%s: Invalid peer\n", __func__);
7470 		status = QDF_STATUS_E_FAILURE;
7471 		goto fail;
7472 	}
7473 
7474 	dp_print_peer_stats(peer);
7475 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
7476 fail:
7477 	if (peer)
7478 		dp_peer_unref_delete(peer);
7479 
7480 	return status;
7481 }
7482 
7483 /**
7484  * dp_txrx_stats_help() - Helper function for Txrx_Stats
7485  *
7486  * Return: None
7487  */
7488 static void dp_txrx_stats_help(void)
7489 {
7490 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
7491 	dp_info("stats_option:");
7492 	dp_info("  1 -- HTT Tx Statistics");
7493 	dp_info("  2 -- HTT Rx Statistics");
7494 	dp_info("  3 -- HTT Tx HW Queue Statistics");
7495 	dp_info("  4 -- HTT Tx HW Sched Statistics");
7496 	dp_info("  5 -- HTT Error Statistics");
7497 	dp_info("  6 -- HTT TQM Statistics");
7498 	dp_info("  7 -- HTT TQM CMDQ Statistics");
7499 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
7500 	dp_info("  9 -- HTT Tx Rate Statistics");
7501 	dp_info(" 10 -- HTT Rx Rate Statistics");
7502 	dp_info(" 11 -- HTT Peer Statistics");
7503 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
7504 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
7505 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
7506 	dp_info(" 15 -- HTT SRNG Statistics");
7507 	dp_info(" 16 -- HTT SFM Info Statistics");
7508 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
7509 	dp_info(" 18 -- HTT Peer List Details");
7510 	dp_info(" 20 -- Clear Host Statistics");
7511 	dp_info(" 21 -- Host Rx Rate Statistics");
7512 	dp_info(" 22 -- Host Tx Rate Statistics");
7513 	dp_info(" 23 -- Host Tx Statistics");
7514 	dp_info(" 24 -- Host Rx Statistics");
7515 	dp_info(" 25 -- Host AST Statistics");
7516 	dp_info(" 26 -- Host SRNG PTR Statistics");
7517 	dp_info(" 27 -- Host Mon Statistics");
7518 	dp_info(" 28 -- Host REO Queue Statistics");
7519 	dp_info(" 29 -- Host Soc cfg param Statistics");
7520 	dp_info(" 30 -- Host pdev cfg param Statistics");
7521 }
7522 
7523 /**
7524  * dp_print_host_stats()- Function to print the stats aggregated at host
7525  * @vdev_handle: DP_VDEV handle
7526  * @type: host stats type
7527  *
7528  * Return: 0 on success, print error message in case of failure
7529  */
7530 static int
7531 dp_print_host_stats(struct dp_vdev *vdev,
7532 		    struct cdp_txrx_stats_req *req)
7533 {
7534 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7535 	enum cdp_host_txrx_stats type =
7536 			dp_stats_mapping_table[req->stats][STATS_HOST];
7537 
7538 	dp_aggregate_pdev_stats(pdev);
7539 
7540 	switch (type) {
7541 	case TXRX_CLEAR_STATS:
7542 		dp_txrx_host_stats_clr(vdev);
7543 		break;
7544 	case TXRX_RX_RATE_STATS:
7545 		dp_print_rx_rates(vdev);
7546 		break;
7547 	case TXRX_TX_RATE_STATS:
7548 		dp_print_tx_rates(vdev);
7549 		break;
7550 	case TXRX_TX_HOST_STATS:
7551 		dp_print_pdev_tx_stats(pdev);
7552 		dp_print_soc_tx_stats(pdev->soc);
7553 		break;
7554 	case TXRX_RX_HOST_STATS:
7555 		dp_print_pdev_rx_stats(pdev);
7556 		dp_print_soc_rx_stats(pdev->soc);
7557 		break;
7558 	case TXRX_AST_STATS:
7559 		dp_print_ast_stats(pdev->soc);
7560 		dp_print_peer_table(vdev);
7561 		break;
7562 	case TXRX_SRNG_PTR_STATS:
7563 		dp_print_ring_stats(pdev);
7564 		break;
7565 	case TXRX_RX_MON_STATS:
7566 		dp_print_pdev_rx_mon_stats(pdev);
7567 		break;
7568 	case TXRX_REO_QUEUE_STATS:
7569 		dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc,
7570 				       req->peer_addr);
7571 		break;
7572 	case TXRX_SOC_CFG_PARAMS:
7573 		dp_print_soc_cfg_params(pdev->soc);
7574 		break;
7575 	case TXRX_PDEV_CFG_PARAMS:
7576 		dp_print_pdev_cfg_params(pdev);
7577 		break;
7578 	case TXRX_NAPI_STATS:
7579 		dp_print_napi_stats(pdev->soc);
7580 	case TXRX_SOC_INTERRUPT_STATS:
7581 		dp_print_soc_interrupt_stats(pdev->soc);
7582 		break;
7583 	default:
7584 		dp_info("Wrong Input For TxRx Host Stats");
7585 		dp_txrx_stats_help();
7586 		break;
7587 	}
7588 	return 0;
7589 }
7590 
7591 /*
7592  * dp_ppdu_ring_reset()- Reset PPDU Stats ring
7593  * @pdev: DP_PDEV handle
7594  *
7595  * Return: void
7596  */
7597 static void
7598 dp_ppdu_ring_reset(struct dp_pdev *pdev)
7599 {
7600 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
7601 	int mac_id;
7602 
7603 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
7604 
7605 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
7606 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
7607 							pdev->pdev_id);
7608 
7609 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
7610 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
7611 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
7612 	}
7613 }
7614 
7615 /*
7616  * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
7617  * @pdev: DP_PDEV handle
7618  *
7619  * Return: void
7620  */
7621 static void
7622 dp_ppdu_ring_cfg(struct dp_pdev *pdev)
7623 {
7624 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
7625 	int mac_id;
7626 
7627 	htt_tlv_filter.mpdu_start = 1;
7628 	htt_tlv_filter.msdu_start = 0;
7629 	htt_tlv_filter.packet = 0;
7630 	htt_tlv_filter.msdu_end = 0;
7631 	htt_tlv_filter.mpdu_end = 0;
7632 	htt_tlv_filter.attention = 0;
7633 	htt_tlv_filter.ppdu_start = 1;
7634 	htt_tlv_filter.ppdu_end = 1;
7635 	htt_tlv_filter.ppdu_end_user_stats = 1;
7636 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
7637 	htt_tlv_filter.ppdu_end_status_done = 1;
7638 	htt_tlv_filter.enable_fp = 1;
7639 	htt_tlv_filter.enable_md = 0;
7640 	if (pdev->neighbour_peers_added &&
7641 	    pdev->soc->hw_nac_monitor_support) {
7642 		htt_tlv_filter.enable_md = 1;
7643 		htt_tlv_filter.packet_header = 1;
7644 	}
7645 	if (pdev->mcopy_mode) {
7646 		htt_tlv_filter.packet_header = 1;
7647 		htt_tlv_filter.enable_mo = 1;
7648 	}
7649 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
7650 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
7651 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
7652 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
7653 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
7654 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
7655 	if (pdev->neighbour_peers_added &&
7656 	    pdev->soc->hw_nac_monitor_support)
7657 		htt_tlv_filter.md_data_filter = FILTER_DATA_ALL;
7658 
7659 	htt_tlv_filter.offset_valid = false;
7660 
7661 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
7662 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
7663 						pdev->pdev_id);
7664 
7665 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
7666 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
7667 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
7668 	}
7669 }
7670 
7671 /*
7672  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
7673  *                              modes are enabled or not.
7674  * @dp_pdev: dp pdev handle.
7675  *
7676  * Return: bool
7677  */
7678 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
7679 {
7680 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
7681 	    !pdev->mcopy_mode)
7682 		return true;
7683 	else
7684 		return false;
7685 }
7686 
7687 /*
7688  *dp_set_bpr_enable() - API to enable/disable bpr feature
7689  *@pdev_handle: DP_PDEV handle.
7690  *@val: Provided value.
7691  *
7692  *Return: 0 for success. nonzero for failure.
7693  */
7694 static QDF_STATUS
7695 dp_set_bpr_enable(struct dp_pdev *pdev, int val)
7696 {
7697 	switch (val) {
7698 	case CDP_BPR_DISABLE:
7699 		pdev->bpr_enable = CDP_BPR_DISABLE;
7700 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7701 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
7702 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7703 		} else if (pdev->enhanced_stats_en &&
7704 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7705 			   !pdev->pktlog_ppdu_stats) {
7706 			dp_h2t_cfg_stats_msg_send(pdev,
7707 						  DP_PPDU_STATS_CFG_ENH_STATS,
7708 						  pdev->pdev_id);
7709 		}
7710 		break;
7711 	case CDP_BPR_ENABLE:
7712 		pdev->bpr_enable = CDP_BPR_ENABLE;
7713 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
7714 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
7715 			dp_h2t_cfg_stats_msg_send(pdev,
7716 						  DP_PPDU_STATS_CFG_BPR,
7717 						  pdev->pdev_id);
7718 		} else if (pdev->enhanced_stats_en &&
7719 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7720 			   !pdev->pktlog_ppdu_stats) {
7721 			dp_h2t_cfg_stats_msg_send(pdev,
7722 						  DP_PPDU_STATS_CFG_BPR_ENH,
7723 						  pdev->pdev_id);
7724 		} else if (pdev->pktlog_ppdu_stats) {
7725 			dp_h2t_cfg_stats_msg_send(pdev,
7726 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
7727 						  pdev->pdev_id);
7728 		}
7729 		break;
7730 	default:
7731 		break;
7732 	}
7733 
7734 	return QDF_STATUS_SUCCESS;
7735 }
7736 
7737 /*
7738  * dp_pdev_tid_stats_ingress_inc
7739  * @pdev: pdev handle
7740  * @val: increase in value
7741  *
7742  * Return: void
7743  */
7744 static void
7745 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val)
7746 {
7747 	pdev->stats.tid_stats.ingress_stack += val;
7748 }
7749 
7750 /*
7751  * dp_pdev_tid_stats_osif_drop
7752  * @pdev: pdev handle
7753  * @val: increase in value
7754  *
7755  * Return: void
7756  */
7757 static void
7758 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val)
7759 {
7760 	pdev->stats.tid_stats.osif_drop += val;
7761 }
7762 
7763 
7764 /*
7765  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
7766  * @pdev: DP_PDEV handle
7767  * @val: user provided value
7768  *
7769  * Return: 0 for success. nonzero for failure.
7770  */
7771 static QDF_STATUS
7772 dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
7773 {
7774 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7775 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7776 
7777 	if (pdev->mcopy_mode)
7778 		dp_reset_monitor_mode(pdev_handle);
7779 
7780 	switch (val) {
7781 	case 0:
7782 		pdev->tx_sniffer_enable = 0;
7783 
7784 		pdev->monitor_configured = false;
7785 
7786 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7787 		    !pdev->bpr_enable) {
7788 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7789 			dp_ppdu_ring_reset(pdev);
7790 		} else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
7791 			dp_h2t_cfg_stats_msg_send(pdev,
7792 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7793 		} else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
7794 			dp_h2t_cfg_stats_msg_send(pdev,
7795 						  DP_PPDU_STATS_CFG_BPR_ENH,
7796 						  pdev->pdev_id);
7797 		} else {
7798 			dp_h2t_cfg_stats_msg_send(pdev,
7799 						  DP_PPDU_STATS_CFG_BPR,
7800 						  pdev->pdev_id);
7801 		}
7802 		break;
7803 
7804 	case 1:
7805 		pdev->tx_sniffer_enable = 1;
7806 		pdev->monitor_configured = false;
7807 
7808 		if (!pdev->pktlog_ppdu_stats)
7809 			dp_h2t_cfg_stats_msg_send(pdev,
7810 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7811 		break;
7812 	case 2:
7813 		if (pdev->monitor_vdev) {
7814 			status = QDF_STATUS_E_RESOURCES;
7815 			break;
7816 		}
7817 
7818 		pdev->mcopy_mode = 1;
7819 		dp_pdev_configure_monitor_rings(pdev);
7820 		pdev->monitor_configured = true;
7821 		pdev->tx_sniffer_enable = 0;
7822 
7823 		if (!pdev->pktlog_ppdu_stats)
7824 			dp_h2t_cfg_stats_msg_send(pdev,
7825 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7826 		break;
7827 
7828 	default:
7829 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7830 			"Invalid value");
7831 		break;
7832 	}
7833 	return status;
7834 }
7835 
7836 /*
7837  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
7838  * @soc_handle: DP_SOC handle
7839  * @pdev_id: id of DP_PDEV handle
7840  *
7841  * Return: QDF_STATUS
7842  */
7843 static QDF_STATUS
7844 dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
7845 {
7846 	struct dp_pdev *pdev =
7847 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7848 						   pdev_id);
7849 
7850 	if (!pdev)
7851 		return QDF_STATUS_E_FAILURE;
7852 
7853 	if (pdev->enhanced_stats_en == 0)
7854 		dp_cal_client_timer_start(pdev->cal_client_ctx);
7855 
7856 	pdev->enhanced_stats_en = 1;
7857 
7858 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
7859 	    !pdev->monitor_vdev)
7860 		dp_ppdu_ring_cfg(pdev);
7861 
7862 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7863 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7864 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7865 		dp_h2t_cfg_stats_msg_send(pdev,
7866 					  DP_PPDU_STATS_CFG_BPR_ENH,
7867 					  pdev->pdev_id);
7868 	}
7869 
7870 	return QDF_STATUS_SUCCESS;
7871 }
7872 
7873 /*
7874  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
7875  *
7876  * @param soc - the soc handle
7877  * @param pdev_id - pdev_id of pdev
7878  * @return - QDF_STATUS
7879  */
7880 static QDF_STATUS
7881 dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
7882 {
7883 	struct dp_pdev *pdev =
7884 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7885 						   pdev_id);
7886 
7887 	if (!pdev)
7888 		return QDF_STATUS_E_FAILURE;
7889 
7890 	if (pdev->enhanced_stats_en == 1)
7891 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
7892 
7893 	pdev->enhanced_stats_en = 0;
7894 
7895 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7896 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7897 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7898 		dp_h2t_cfg_stats_msg_send(pdev,
7899 					  DP_PPDU_STATS_CFG_BPR,
7900 					  pdev->pdev_id);
7901 	}
7902 
7903 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
7904 	    !pdev->monitor_vdev)
7905 		dp_ppdu_ring_reset(pdev);
7906 
7907 	return QDF_STATUS_SUCCESS;
7908 }
7909 
7910 /*
7911  * dp_get_fw_peer_stats()- function to print peer stats
7912  * @soc: soc handle
7913  * @pdev_id : id of the pdev handle
7914  * @mac_addr: mac address of the peer
7915  * @cap: Type of htt stats requested
7916  * @is_wait: if set, wait on completion from firmware response
7917  *
7918  * Currently Supporting only MAC ID based requests Only
7919  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
7920  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
7921  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
7922  *
7923  * Return: QDF_STATUS
7924  */
7925 static QDF_STATUS
7926 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
7927 		     uint8_t *mac_addr,
7928 		     uint32_t cap, uint32_t is_wait)
7929 {
7930 	int i;
7931 	uint32_t config_param0 = 0;
7932 	uint32_t config_param1 = 0;
7933 	uint32_t config_param2 = 0;
7934 	uint32_t config_param3 = 0;
7935 	struct dp_pdev *pdev =
7936 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7937 						   pdev_id);
7938 
7939 	if (!pdev)
7940 		return QDF_STATUS_E_FAILURE;
7941 
7942 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
7943 	config_param0 |= (1 << (cap + 1));
7944 
7945 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
7946 		config_param1 |= (1 << i);
7947 	}
7948 
7949 	config_param2 |= (mac_addr[0] & 0x000000ff);
7950 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
7951 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
7952 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
7953 
7954 	config_param3 |= (mac_addr[4] & 0x000000ff);
7955 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
7956 
7957 	if (is_wait) {
7958 		qdf_event_reset(&pdev->fw_peer_stats_event);
7959 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
7960 					  config_param0, config_param1,
7961 					  config_param2, config_param3,
7962 					  0, 1, 0);
7963 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
7964 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
7965 	} else {
7966 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
7967 					  config_param0, config_param1,
7968 					  config_param2, config_param3,
7969 					  0, 0, 0);
7970 	}
7971 
7972 	return QDF_STATUS_SUCCESS;
7973 
7974 }
7975 
7976 /* This struct definition will be removed from here
7977  * once it get added in FW headers*/
7978 struct httstats_cmd_req {
7979     uint32_t    config_param0;
7980     uint32_t    config_param1;
7981     uint32_t    config_param2;
7982     uint32_t    config_param3;
7983     int cookie;
7984     u_int8_t    stats_id;
7985 };
7986 
7987 /*
7988  * dp_get_htt_stats: function to process the httstas request
7989  * @soc: DP soc handle
7990  * @pdev_id: id of pdev handle
7991  * @data: pointer to request data
7992  * @data_len: length for request data
7993  *
7994  * return: QDF_STATUS
7995  */
7996 static QDF_STATUS
7997 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data,
7998 		 uint32_t data_len)
7999 {
8000 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
8001 	struct dp_pdev *pdev =
8002 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8003 						   pdev_id);
8004 
8005 	if (!pdev)
8006 		return QDF_STATUS_E_FAILURE;
8007 
8008 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
8009 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
8010 				req->config_param0, req->config_param1,
8011 				req->config_param2, req->config_param3,
8012 				req->cookie, 0, 0);
8013 
8014 	return QDF_STATUS_SUCCESS;
8015 }
8016 
8017 /*
8018  * dp_set_pdev_param: function to set parameters in pdev
8019  * @pdev_handle: DP pdev handle
8020  * @param: parameter type to be set
8021  * @val: value of parameter to be set
8022  *
8023  * Return: 0 for success. nonzero for failure.
8024  */
8025 static QDF_STATUS dp_set_pdev_param(struct cdp_pdev *pdev_handle,
8026 				    enum cdp_pdev_param_type param,
8027 				    uint32_t val)
8028 {
8029 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8030 	switch (param) {
8031 	case CDP_CONFIG_DEBUG_SNIFFER:
8032 		return dp_config_debug_sniffer(pdev_handle, val);
8033 	case CDP_CONFIG_BPR_ENABLE:
8034 		return dp_set_bpr_enable(pdev, val);
8035 	case CDP_CONFIG_PRIMARY_RADIO:
8036 		pdev->is_primary = val;
8037 		break;
8038 	case CDP_CONFIG_CAPTURE_LATENCY:
8039 		if (val == 1)
8040 			pdev->latency_capture_enable = true;
8041 		else
8042 			pdev->latency_capture_enable = false;
8043 		break;
8044 	case CDP_INGRESS_STATS:
8045 		dp_pdev_tid_stats_ingress_inc(pdev, val);
8046 		break;
8047 	case CDP_OSIF_DROP:
8048 		dp_pdev_tid_stats_osif_drop(pdev, val);
8049 		break;
8050 	case CDP_CONFIG_ENH_RX_CAPTURE:
8051 		return dp_config_enh_rx_capture(pdev_handle, val);
8052 	case CDP_CONFIG_TX_CAPTURE:
8053 		return dp_config_enh_tx_capture(pdev_handle, val);
8054 	default:
8055 		return QDF_STATUS_E_INVAL;
8056 	}
8057 	return QDF_STATUS_SUCCESS;
8058 }
8059 
8060 /*
8061  * dp_calculate_delay_stats: function to get rx delay stats
8062  * @vdev_handle: DP vdev handle
8063  * @nbuf: skb
8064  *
8065  * Return: void
8066  */
8067 static void dp_calculate_delay_stats(struct cdp_vdev *vdev_handle,
8068 				     qdf_nbuf_t nbuf)
8069 {
8070 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8071 
8072 	dp_rx_compute_delay(vdev, nbuf);
8073 }
8074 
8075 /*
8076  * dp_get_vdev_param: function to get parameters from vdev
8077  * @param: parameter type to get value
8078  *
8079  * return: void
8080  */
8081 static uint32_t dp_get_vdev_param(struct cdp_vdev *vdev_handle,
8082 				  enum cdp_vdev_param_type param)
8083 {
8084 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8085 	uint32_t val;
8086 
8087 	switch (param) {
8088 	case CDP_ENABLE_WDS:
8089 		val = vdev->wds_enabled;
8090 		break;
8091 	case CDP_ENABLE_MEC:
8092 		val = vdev->mec_enabled;
8093 		break;
8094 	case CDP_ENABLE_DA_WAR:
8095 		val = vdev->pdev->soc->da_war_enabled;
8096 		break;
8097 	default:
8098 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8099 			  "param value %d is wrong\n",
8100 			  param);
8101 		val = -1;
8102 		break;
8103 	}
8104 
8105 	return val;
8106 }
8107 
8108 /*
8109  * dp_set_vdev_param: function to set parameters in vdev
8110  * @param: parameter type to be set
8111  * @val: value of parameter to be set
8112  *
8113  * return: void
8114  */
8115 static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
8116 		enum cdp_vdev_param_type param, uint32_t val)
8117 {
8118 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8119 	switch (param) {
8120 	case CDP_ENABLE_WDS:
8121 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8122 			  "wds_enable %d for vdev(%pK) id(%d)\n",
8123 			  val, vdev, vdev->vdev_id);
8124 		vdev->wds_enabled = val;
8125 		break;
8126 	case CDP_ENABLE_MEC:
8127 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8128 			  "mec_enable %d for vdev(%pK) id(%d)\n",
8129 			  val, vdev, vdev->vdev_id);
8130 		vdev->mec_enabled = val;
8131 		break;
8132 	case CDP_ENABLE_DA_WAR:
8133 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8134 			  "da_war_enable %d for vdev(%pK) id(%d)\n",
8135 			  val, vdev, vdev->vdev_id);
8136 		vdev->pdev->soc->da_war_enabled = val;
8137 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
8138 					     vdev->pdev->soc));
8139 		break;
8140 	case CDP_ENABLE_NAWDS:
8141 		vdev->nawds_enabled = val;
8142 		break;
8143 	case CDP_ENABLE_MCAST_EN:
8144 		vdev->mcast_enhancement_en = val;
8145 		break;
8146 	case CDP_ENABLE_PROXYSTA:
8147 		vdev->proxysta_vdev = val;
8148 		break;
8149 	case CDP_UPDATE_TDLS_FLAGS:
8150 		vdev->tdls_link_connected = val;
8151 		break;
8152 	case CDP_CFG_WDS_AGING_TIMER:
8153 		if (val == 0)
8154 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
8155 		else if (val != vdev->wds_aging_timer_val)
8156 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, val);
8157 
8158 		vdev->wds_aging_timer_val = val;
8159 		break;
8160 	case CDP_ENABLE_AP_BRIDGE:
8161 		if (wlan_op_mode_sta != vdev->opmode)
8162 			vdev->ap_bridge_enabled = val;
8163 		else
8164 			vdev->ap_bridge_enabled = false;
8165 		break;
8166 	case CDP_ENABLE_CIPHER:
8167 		vdev->sec_type = val;
8168 		break;
8169 	case CDP_ENABLE_QWRAP_ISOLATION:
8170 		vdev->isolation_vdev = val;
8171 		break;
8172 	case CDP_UPDATE_MULTIPASS:
8173 		vdev->multipass_en = val;
8174 		break;
8175 	default:
8176 		break;
8177 	}
8178 
8179 	dp_tx_vdev_update_search_flags(vdev);
8180 }
8181 
8182 /**
8183  * dp_peer_set_nawds: set nawds bit in peer
8184  * @peer_handle: pointer to peer
8185  * @value: enable/disable nawds
8186  *
8187  * return: void
8188  */
8189 static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
8190 {
8191 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8192 	peer->nawds_enabled = value;
8193 }
8194 
8195 /**
8196  * dp_peer_set_tx_capture_enabled: Set tx_cap_enabled bit in peer
8197  * @peer_handle: Peer handle
8198  * @value: Enable/disable setting for tx_cap_enabled
8199  *
8200  * Return: None
8201  */
8202 static void
8203 dp_peer_set_tx_capture_enabled(struct cdp_peer *peer_handle, bool value)
8204 {
8205 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8206 
8207 	peer->tx_cap_enabled = value;
8208 }
8209 
8210 /**
8211  * dp_peer_set_rx_capture_enabled: Set rx_cap_enabled bit in peer
8212  * @peer_handle: Peer handle
8213  * @value: Enable/disable setting for rx_cap_enabled
8214  *
8215  * Return: None
8216  */
8217 static void
8218 dp_peer_set_rx_capture_enabled(struct cdp_peer *peer_handle, bool value)
8219 {
8220 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8221 
8222 	peer->rx_cap_enabled = value;
8223 }
8224 
8225 /**
8226  * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
8227  * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
8228  * @is_tx_pkt_cap_enable: enable/disable Tx packet capture in monitor mode
8229  * @peer_mac: MAC address for which the above need to be enabled/disabled
8230  *
8231  * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
8232  */
8233 QDF_STATUS
8234 dp_peer_update_pkt_capture_params(struct cdp_pdev *pdev,
8235 				  bool is_rx_pkt_cap_enable,
8236 				  bool is_tx_pkt_cap_enable,
8237 				  uint8_t *peer_mac)
8238 
8239 {
8240 	struct dp_peer *peer;
8241 
8242 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev,
8243 			peer_mac);
8244 
8245 	if (!peer) {
8246 		dp_err("Invalid Peer");
8247 		return QDF_STATUS_E_FAILURE;
8248 	}
8249 
8250 	dp_peer_set_rx_capture_enabled((struct cdp_peer *)peer,
8251 				       is_rx_pkt_cap_enable);
8252 	dp_peer_set_tx_capture_enabled((struct cdp_peer *)peer,
8253 				       is_tx_pkt_cap_enable);
8254 	return QDF_STATUS_SUCCESS;
8255 }
8256 
8257 /*
8258  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
8259  * @soc: DP_SOC handle
8260  * @vdev_id: id of DP_VDEV handle
8261  * @map_id:ID of map that needs to be updated
8262  *
8263  * Return: QDF_STATUS
8264  */
8265 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle soc,
8266 						 uint8_t vdev_id,
8267 						 uint8_t map_id)
8268 {
8269 	struct dp_vdev *vdev =
8270 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
8271 						   vdev_id);
8272 	if (vdev) {
8273 		vdev->dscp_tid_map_id = map_id;
8274 		return QDF_STATUS_SUCCESS;
8275 	}
8276 
8277 	return QDF_STATUS_E_FAILURE;
8278 }
8279 
8280 #ifdef DP_RATETABLE_SUPPORT
8281 static int dp_txrx_get_ratekbps(int preamb, int mcs,
8282 				int htflag, int gintval)
8283 {
8284 	uint32_t rix;
8285 	uint16_t ratecode;
8286 
8287 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
8288 			       (uint8_t)preamb, 1, &rix, &ratecode);
8289 }
8290 #else
8291 static int dp_txrx_get_ratekbps(int preamb, int mcs,
8292 				int htflag, int gintval)
8293 {
8294 	return 0;
8295 }
8296 #endif
8297 
8298 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
8299  * @soc: DP soc handle
8300  * @pdev_id: id of DP pdev handle
8301  * @pdev_stats: buffer to copy to
8302  *
8303  * return : status success/failure
8304  */
8305 static QDF_STATUS
8306 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
8307 		       struct cdp_pdev_stats *pdev_stats)
8308 {
8309 	struct dp_pdev *pdev =
8310 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8311 						   pdev_id);
8312 	if (!pdev)
8313 		return QDF_STATUS_E_FAILURE;
8314 
8315 	dp_aggregate_pdev_stats(pdev);
8316 
8317 	qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
8318 	return QDF_STATUS_SUCCESS;
8319 }
8320 
8321 /* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP
8322  * @vdev_handle: DP vdev handle
8323  * @buf: buffer containing specific stats structure
8324  *
8325  * Returns: void
8326  */
8327 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
8328 					 void *buf)
8329 {
8330 	struct cdp_tx_ingress_stats *host_stats = NULL;
8331 
8332 	if (!buf) {
8333 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8334 			  "Invalid host stats buf");
8335 		return;
8336 	}
8337 	host_stats = (struct cdp_tx_ingress_stats *)buf;
8338 
8339 	DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
8340 			 host_stats->mcast_en.mcast_pkt.num,
8341 			 host_stats->mcast_en.mcast_pkt.bytes);
8342 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
8343 		     host_stats->mcast_en.dropped_map_error);
8344 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
8345 		     host_stats->mcast_en.dropped_self_mac);
8346 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
8347 		     host_stats->mcast_en.dropped_send_fail);
8348 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
8349 		     host_stats->mcast_en.ucast);
8350 	DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
8351 		     host_stats->mcast_en.fail_seg_alloc);
8352 	DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
8353 		     host_stats->mcast_en.clone_fail);
8354 }
8355 
8356 /* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP
8357  * @soc: DP soc handle
8358  * @vdev_id: id of DP vdev handle
8359  * @buf: buffer containing specific stats structure
8360  * @stats_id: stats type
8361  *
8362  * Returns: QDF_STATUS
8363  */
8364 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc,
8365 						 uint8_t vdev_id,
8366 						 void *buf,
8367 						 uint16_t stats_id)
8368 {
8369 	struct dp_vdev *vdev =
8370 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
8371 						   vdev_id);
8372 	if (!vdev) {
8373 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8374 			  "Invalid vdev handle");
8375 		return QDF_STATUS_E_FAILURE;
8376 	}
8377 	switch (stats_id) {
8378 	case DP_VDEV_STATS_PKT_CNT_ONLY:
8379 		break;
8380 	case DP_VDEV_STATS_TX_ME:
8381 		dp_txrx_update_vdev_me_stats(vdev, buf);
8382 		break;
8383 	default:
8384 		qdf_info("Invalid stats_id %d", stats_id);
8385 		break;
8386 	}
8387 
8388 	return QDF_STATUS_SUCCESS;
8389 }
8390 
8391 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
8392  * @soc: soc handle
8393  * @vdev_id: id of vdev handle
8394  * @peer_mac: mac of DP_PEER handle
8395  * @peer_stats: buffer to copy to
8396  * return : status success/failure
8397  */
8398 static QDF_STATUS
8399 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
8400 		       uint8_t *peer_mac, struct cdp_peer_stats *peer_stats)
8401 {
8402 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8403 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8404 						       peer_mac, 0, vdev_id);
8405 
8406 	if (!peer || peer->delete_in_progress) {
8407 		status = QDF_STATUS_E_FAILURE;
8408 		goto fail;
8409 	} else
8410 		qdf_mem_copy(peer_stats, &peer->stats,
8411 			     sizeof(struct cdp_peer_stats));
8412 
8413 fail:
8414 	if (peer)
8415 		dp_peer_unref_delete(peer);
8416 
8417 	return status;
8418 }
8419 
8420 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
8421  * @soc: soc handle
8422  * @vdev_id: id of vdev handle
8423  * @peer_mac: mac of DP_PEER handle
8424  *
8425  * return : QDF_STATUS
8426  */
8427 static QDF_STATUS
8428 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
8429 			 uint8_t *peer_mac)
8430 {
8431 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8432 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8433 						       peer_mac, 0, vdev_id);
8434 
8435 	if (!peer || peer->delete_in_progress) {
8436 		status = QDF_STATUS_E_FAILURE;
8437 		goto fail;
8438 	}
8439 
8440 	qdf_mem_zero(&peer->stats, sizeof(peer->stats));
8441 
8442 fail:
8443 	if (peer)
8444 		dp_peer_unref_delete(peer);
8445 
8446 	return status;
8447 }
8448 
8449 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
8450  * @vdev_handle: DP_VDEV handle
8451  * @buf: buffer for vdev stats
8452  *
8453  * return : int
8454  */
8455 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
8456 				   void *buf, bool is_aggregate)
8457 {
8458 	struct cdp_vdev_stats *vdev_stats;
8459 	struct dp_pdev *pdev;
8460 	struct dp_vdev *vdev =
8461 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
8462 						   vdev_id);
8463 
8464 	if (!vdev)
8465 		return 1;
8466 
8467 	pdev = vdev->pdev;
8468 	if (!pdev)
8469 		return 1;
8470 
8471 	vdev_stats = (struct cdp_vdev_stats *)buf;
8472 
8473 	if (is_aggregate) {
8474 		qdf_spin_lock_bh(&((struct dp_soc *)soc)->peer_ref_mutex);
8475 		dp_aggregate_vdev_stats(vdev, buf);
8476 		qdf_spin_unlock_bh(&((struct dp_soc *)soc)->peer_ref_mutex);
8477 	} else {
8478 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
8479 	}
8480 
8481 	return 0;
8482 }
8483 
8484 /*
8485  * dp_get_total_per(): get total per
8486  * @soc: DP soc handle
8487  * @pdev_id: id of DP_PDEV handle
8488  *
8489  * Return: % error rate using retries per packet and success packets
8490  */
8491 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id)
8492 {
8493 	struct dp_pdev *pdev =
8494 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8495 						   pdev_id);
8496 
8497 	if (!pdev)
8498 		return 0;
8499 
8500 	dp_aggregate_pdev_stats(pdev);
8501 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
8502 		return 0;
8503 	return ((pdev->stats.tx.retries * 100) /
8504 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
8505 }
8506 
8507 /*
8508  * dp_txrx_stats_publish(): publish pdev stats into a buffer
8509  * @soc: DP soc handle
8510  * @pdev_id: id of DP_PDEV handle
8511  * @buf: to hold pdev_stats
8512  *
8513  * Return: int
8514  */
8515 static int
8516 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id,
8517 		      struct cdp_stats_extd *buf)
8518 {
8519 	struct cdp_txrx_stats_req req = {0,};
8520 	struct dp_pdev *pdev =
8521 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8522 						   pdev_id);
8523 
8524 	if (!pdev)
8525 		return TXRX_STATS_LEVEL_OFF;
8526 
8527 	dp_aggregate_pdev_stats(pdev);
8528 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
8529 	req.cookie_val = 1;
8530 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
8531 				req.param1, req.param2, req.param3, 0,
8532 				req.cookie_val, 0);
8533 
8534 	msleep(DP_MAX_SLEEP_TIME);
8535 
8536 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
8537 	req.cookie_val = 1;
8538 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
8539 				req.param1, req.param2, req.param3, 0,
8540 				req.cookie_val, 0);
8541 
8542 	msleep(DP_MAX_SLEEP_TIME);
8543 	qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_stats_extd));
8544 
8545 	return TXRX_STATS_LEVEL;
8546 }
8547 
8548 /**
8549  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
8550  * @soc: soc handle
8551  * @pdev_id: id of DP_PDEV handle
8552  * @map_id: ID of map that needs to be updated
8553  * @tos: index value in map
8554  * @tid: tid value passed by the user
8555  *
8556  * Return: QDF_STATUS
8557  */
8558 static QDF_STATUS
8559 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle,
8560 			       uint8_t pdev_id,
8561 			       uint8_t map_id,
8562 			       uint8_t tos, uint8_t tid)
8563 {
8564 	uint8_t dscp;
8565 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8566 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
8567 
8568 	if (!pdev)
8569 		return QDF_STATUS_E_FAILURE;
8570 
8571 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
8572 	pdev->dscp_tid_map[map_id][dscp] = tid;
8573 
8574 	if (map_id < soc->num_hw_dscp_tid_map)
8575 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
8576 				       map_id, dscp);
8577 	else
8578 		return QDF_STATUS_E_FAILURE;
8579 
8580 	return QDF_STATUS_SUCCESS;
8581 }
8582 
8583 /**
8584  * dp_hmmc_tid_override_en_wifi3(): Function to enable hmmc tid override.
8585  * @pdev_handle: pdev handle
8586  * @val: hmmc-dscp flag value
8587  *
8588  * Return: void
8589  */
8590 static void dp_hmmc_tid_override_en_wifi3(struct cdp_pdev *pdev_handle,
8591 					  bool val)
8592 {
8593 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8594 
8595 	pdev->hmmc_tid_override_en = val;
8596 }
8597 
8598 /**
8599  * dp_set_hmmc_tid_val_wifi3(): Function to set hmmc tid value.
8600  * @pdev_handle: pdev handle
8601  * @tid: tid value
8602  *
8603  * Return: void
8604  */
8605 static void dp_set_hmmc_tid_val_wifi3(struct cdp_pdev *pdev_handle,
8606 				      uint8_t tid)
8607 {
8608 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8609 
8610 	pdev->hmmc_tid = tid;
8611 }
8612 
8613 /**
8614  * dp_fw_stats_process(): Process TxRX FW stats request
8615  * @vdev_handle: DP VDEV handle
8616  * @req: stats request
8617  *
8618  * return: int
8619  */
8620 static int dp_fw_stats_process(struct dp_vdev *vdev,
8621 			       struct cdp_txrx_stats_req *req)
8622 {
8623 	struct dp_pdev *pdev = NULL;
8624 	uint32_t stats = req->stats;
8625 	uint8_t mac_id = req->mac_id;
8626 
8627 	if (!vdev) {
8628 		DP_TRACE(NONE, "VDEV not found");
8629 		return 1;
8630 	}
8631 	pdev = vdev->pdev;
8632 
8633 	/*
8634 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
8635 	 * from param0 to param3 according to below rule:
8636 	 *
8637 	 * PARAM:
8638 	 *   - config_param0 : start_offset (stats type)
8639 	 *   - config_param1 : stats bmask from start offset
8640 	 *   - config_param2 : stats bmask from start offset + 32
8641 	 *   - config_param3 : stats bmask from start offset + 64
8642 	 */
8643 	if (req->stats == CDP_TXRX_STATS_0) {
8644 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
8645 		req->param1 = 0xFFFFFFFF;
8646 		req->param2 = 0xFFFFFFFF;
8647 		req->param3 = 0xFFFFFFFF;
8648 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
8649 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
8650 	}
8651 
8652 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
8653 				req->param1, req->param2, req->param3,
8654 				0, 0, mac_id);
8655 }
8656 
8657 /**
8658  * dp_txrx_stats_request - function to map to firmware and host stats
8659  * @soc: soc handle
8660  * @vdev_id: virtual device ID
8661  * @req: stats request
8662  *
8663  * Return: QDF_STATUS
8664  */
8665 static
8666 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle,
8667 				 uint8_t vdev_id,
8668 				 struct cdp_txrx_stats_req *req)
8669 {
8670 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle);
8671 	int host_stats;
8672 	int fw_stats;
8673 	enum cdp_stats stats;
8674 	int num_stats;
8675 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc,
8676 								  vdev_id);
8677 
8678 	if (!vdev || !req) {
8679 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8680 				"Invalid vdev/req instance");
8681 		return QDF_STATUS_E_INVAL;
8682 	}
8683 
8684 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
8685 		dp_err("Invalid mac id request");
8686 		return QDF_STATUS_E_INVAL;
8687 	}
8688 
8689 	stats = req->stats;
8690 	if (stats >= CDP_TXRX_MAX_STATS)
8691 		return QDF_STATUS_E_INVAL;
8692 
8693 	/*
8694 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
8695 	 *			has to be updated if new FW HTT stats added
8696 	 */
8697 	if (stats > CDP_TXRX_STATS_HTT_MAX)
8698 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
8699 
8700 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
8701 
8702 	if (stats >= num_stats) {
8703 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8704 			  "%s: Invalid stats option: %d", __func__, stats);
8705 		return QDF_STATUS_E_INVAL;
8706 	}
8707 
8708 	req->stats = stats;
8709 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
8710 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
8711 
8712 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
8713 		stats, fw_stats, host_stats);
8714 
8715 	if (fw_stats != TXRX_FW_STATS_INVALID) {
8716 		/* update request with FW stats type */
8717 		req->stats = fw_stats;
8718 		return dp_fw_stats_process(vdev, req);
8719 	}
8720 
8721 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
8722 			(host_stats <= TXRX_HOST_STATS_MAX))
8723 		return dp_print_host_stats(vdev, req);
8724 	else
8725 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8726 				"Wrong Input for TxRx Stats");
8727 
8728 	return QDF_STATUS_SUCCESS;
8729 }
8730 
8731 /*
8732  * dp_txrx_dump_stats() -  Dump statistics
8733  * @value - Statistics option
8734  */
8735 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
8736 				     enum qdf_stats_verbosity_level level)
8737 {
8738 	struct dp_soc *soc =
8739 		(struct dp_soc *)psoc;
8740 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8741 
8742 	if (!soc) {
8743 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8744 			"%s: soc is NULL", __func__);
8745 		return QDF_STATUS_E_INVAL;
8746 	}
8747 
8748 	switch (value) {
8749 	case CDP_TXRX_PATH_STATS:
8750 		dp_txrx_path_stats(soc);
8751 		dp_print_soc_interrupt_stats(soc);
8752 		break;
8753 
8754 	case CDP_RX_RING_STATS:
8755 		dp_print_per_ring_stats(soc);
8756 		break;
8757 
8758 	case CDP_TXRX_TSO_STATS:
8759 		dp_print_tso_stats(soc, level);
8760 		break;
8761 
8762 	case CDP_DUMP_TX_FLOW_POOL_INFO:
8763 		if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
8764 			cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
8765 		break;
8766 
8767 	case CDP_DP_NAPI_STATS:
8768 		dp_print_napi_stats(soc);
8769 		break;
8770 
8771 	case CDP_TXRX_DESC_STATS:
8772 		/* TODO: NOT IMPLEMENTED */
8773 		break;
8774 
8775 	default:
8776 		status = QDF_STATUS_E_INVAL;
8777 		break;
8778 	}
8779 
8780 	return status;
8781 
8782 }
8783 
8784 /**
8785  * dp_txrx_clear_dump_stats() - clear dumpStats
8786  * @soc- soc handle
8787  * @value - stats option
8788  *
8789  * Return: 0 - Success, non-zero - failure
8790  */
8791 static
8792 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
8793 				    uint8_t value)
8794 {
8795 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8796 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8797 
8798 	if (!soc) {
8799 		dp_err("%s: soc is NULL", __func__);
8800 		return QDF_STATUS_E_INVAL;
8801 	}
8802 
8803 	switch (value) {
8804 	case CDP_TXRX_TSO_STATS:
8805 		dp_txrx_clear_tso_stats(soc);
8806 		break;
8807 
8808 	default:
8809 		status = QDF_STATUS_E_INVAL;
8810 		break;
8811 	}
8812 
8813 	return status;
8814 }
8815 
8816 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
8817 /**
8818  * dp_update_flow_control_parameters() - API to store datapath
8819  *                            config parameters
8820  * @soc: soc handle
8821  * @cfg: ini parameter handle
8822  *
8823  * Return: void
8824  */
8825 static inline
8826 void dp_update_flow_control_parameters(struct dp_soc *soc,
8827 				struct cdp_config_params *params)
8828 {
8829 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
8830 					params->tx_flow_stop_queue_threshold;
8831 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
8832 					params->tx_flow_start_queue_offset;
8833 }
8834 #else
8835 static inline
8836 void dp_update_flow_control_parameters(struct dp_soc *soc,
8837 				struct cdp_config_params *params)
8838 {
8839 }
8840 #endif
8841 
8842 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
8843 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
8844 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
8845 
8846 /* Max packet limit for RX REAP Loop (dp_rx_process) */
8847 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
8848 
8849 static
8850 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
8851 					struct cdp_config_params *params)
8852 {
8853 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
8854 				params->tx_comp_loop_pkt_limit;
8855 
8856 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
8857 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
8858 	else
8859 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
8860 
8861 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
8862 				params->rx_reap_loop_pkt_limit;
8863 
8864 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
8865 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
8866 	else
8867 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
8868 
8869 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
8870 				params->rx_hp_oos_update_limit;
8871 
8872 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
8873 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
8874 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
8875 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
8876 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
8877 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
8878 }
8879 #else
8880 static inline
8881 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
8882 					struct cdp_config_params *params)
8883 { }
8884 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
8885 
8886 /**
8887  * dp_update_config_parameters() - API to store datapath
8888  *                            config parameters
8889  * @soc: soc handle
8890  * @cfg: ini parameter handle
8891  *
8892  * Return: status
8893  */
8894 static
8895 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
8896 				struct cdp_config_params *params)
8897 {
8898 	struct dp_soc *soc = (struct dp_soc *)psoc;
8899 
8900 	if (!(soc)) {
8901 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8902 				"%s: Invalid handle", __func__);
8903 		return QDF_STATUS_E_INVAL;
8904 	}
8905 
8906 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
8907 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
8908 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
8909 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
8910 				params->tcp_udp_checksumoffload;
8911 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
8912 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
8913 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
8914 
8915 	dp_update_rx_soft_irq_limit_params(soc, params);
8916 	dp_update_flow_control_parameters(soc, params);
8917 
8918 	return QDF_STATUS_SUCCESS;
8919 }
8920 
8921 static struct cdp_wds_ops dp_ops_wds = {
8922 	.vdev_set_wds = dp_vdev_set_wds,
8923 #ifdef WDS_VENDOR_EXTENSION
8924 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
8925 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
8926 #endif
8927 };
8928 
8929 /*
8930  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
8931  * @soc_hdl - datapath soc handle
8932  * @vdev_id - virtual interface id
8933  * @callback - callback function
8934  * @ctxt: callback context
8935  *
8936  */
8937 static void
8938 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8939 		       ol_txrx_data_tx_cb callback, void *ctxt)
8940 {
8941 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8942 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
8943 
8944 	if (!vdev)
8945 		return;
8946 
8947 	vdev->tx_non_std_data_callback.func = callback;
8948 	vdev->tx_non_std_data_callback.ctxt = ctxt;
8949 }
8950 
8951 /**
8952  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
8953  * @soc: datapath soc handle
8954  * @pdev_id: id of datapath pdev handle
8955  *
8956  * Return: opaque pointer to dp txrx handle
8957  */
8958 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id)
8959 {
8960 	struct dp_pdev *pdev =
8961 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8962 						   pdev_id);
8963 	if (qdf_unlikely(!pdev))
8964 		return NULL;
8965 
8966 	return pdev->dp_txrx_handle;
8967 }
8968 
8969 /**
8970  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
8971  * @soc: datapath soc handle
8972  * @pdev_id: id of datapath pdev handle
8973  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
8974  *
8975  * Return: void
8976  */
8977 static void
8978 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id,
8979 			   void *dp_txrx_hdl)
8980 {
8981 	struct dp_pdev *pdev =
8982 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8983 						   pdev_id);
8984 
8985 	if (!pdev)
8986 		return;
8987 
8988 	pdev->dp_txrx_handle = dp_txrx_hdl;
8989 }
8990 
8991 /**
8992  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
8993  * @soc_handle: datapath soc handle
8994  *
8995  * Return: opaque pointer to external dp (non-core DP)
8996  */
8997 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
8998 {
8999 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9000 
9001 	return soc->external_txrx_handle;
9002 }
9003 
9004 /**
9005  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
9006  * @soc_handle: datapath soc handle
9007  * @txrx_handle: opaque pointer to external dp (non-core DP)
9008  *
9009  * Return: void
9010  */
9011 static void
9012 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
9013 {
9014 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9015 
9016 	soc->external_txrx_handle = txrx_handle;
9017 }
9018 
9019 /**
9020  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
9021  * @soc_hdl: datapath soc handle
9022  * @pdev_id: id of the datapath pdev handle
9023  * @lmac_id: lmac id
9024  *
9025  * Return: QDF_STATUS
9026  */
9027 static QDF_STATUS
9028 dp_soc_map_pdev_to_lmac(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
9029 			uint32_t lmac_id)
9030 {
9031 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9032 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc,
9033 								  pdev_id);
9034 
9035 	if (qdf_unlikely(!pdev))
9036 		return QDF_STATUS_E_FAILURE;
9037 
9038 	pdev->lmac_id = lmac_id;
9039 	wlan_cfg_set_hw_macid(soc->wlan_cfg_ctx,
9040 			      pdev_id,
9041 			      (lmac_id + 1));
9042 
9043 	return QDF_STATUS_SUCCESS;
9044 }
9045 
9046 /**
9047  * dp_soc_set_pdev_status_down() - set pdev down/up status
9048  * @soc: datapath soc handle
9049  * @pdev_id: id of datapath pdev handle
9050  * @is_pdev_down: pdev down/up status
9051  *
9052  * Return: QDF_STATUS
9053  */
9054 static QDF_STATUS
9055 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id,
9056 			    bool is_pdev_down)
9057 {
9058 	struct dp_pdev *pdev =
9059 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9060 						   pdev_id);
9061 	if (!pdev)
9062 		return QDF_STATUS_E_FAILURE;
9063 
9064 	pdev->is_pdev_down = is_pdev_down;
9065 	return QDF_STATUS_SUCCESS;
9066 }
9067 
9068 /**
9069  * dp_get_cfg_capabilities() - get dp capabilities
9070  * @soc_handle: datapath soc handle
9071  * @dp_caps: enum for dp capabilities
9072  *
9073  * Return: bool to determine if dp caps is enabled
9074  */
9075 static bool
9076 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
9077 			enum cdp_capabilities dp_caps)
9078 {
9079 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9080 
9081 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
9082 }
9083 
9084 #ifdef FEATURE_AST
9085 static QDF_STATUS
9086 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
9087 		       uint8_t *peer_mac)
9088 {
9089 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9090 	QDF_STATUS status = QDF_STATUS_SUCCESS;
9091 	struct dp_peer *peer =
9092 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id);
9093 
9094 	/* Peer can be null for monitor vap mac address */
9095 	if (!peer || peer->delete_in_progress) {
9096 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
9097 			  "%s: Invalid peer\n", __func__);
9098 		status = QDF_STATUS_E_FAILURE;
9099 		goto fail;
9100 	}
9101 	/*
9102 	 * For BSS peer, new peer is not created on alloc_node if the
9103 	 * peer with same address already exists , instead refcnt is
9104 	 * increased for existing peer. Correspondingly in delete path,
9105 	 * only refcnt is decreased; and peer is only deleted , when all
9106 	 * references are deleted. So delete_in_progress should not be set
9107 	 * for bss_peer, unless only 3 reference remains (peer map reference,
9108 	 * peer hash table reference and above local reference).
9109 	 */
9110 	if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 3)) {
9111 		status =  QDF_STATUS_E_FAILURE;
9112 		goto fail;
9113 	}
9114 
9115 	qdf_spin_lock_bh(&soc->ast_lock);
9116 	peer->delete_in_progress = true;
9117 	dp_peer_delete_ast_entries(soc, peer);
9118 	qdf_spin_unlock_bh(&soc->ast_lock);
9119 
9120 fail:
9121 	if (peer)
9122 		dp_peer_unref_delete(peer);
9123 	return status;
9124 }
9125 #endif
9126 
9127 #ifdef ATH_SUPPORT_NAC_RSSI
9128 /**
9129  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
9130  * @vdev_hdl: DP vdev handle
9131  * @rssi: rssi value
9132  *
9133  * Return: 0 for success. nonzero for failure.
9134  */
9135 static QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
9136 					     char *mac_addr, uint8_t *rssi) {
9137 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9138 	struct dp_pdev *pdev = vdev->pdev;
9139 	struct dp_neighbour_peer *peer = NULL;
9140 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
9141 
9142 	*rssi = 0;
9143 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
9144 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
9145 		      neighbour_peer_list_elem) {
9146 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
9147 				mac_addr, QDF_MAC_ADDR_SIZE) == 0) {
9148 			*rssi = peer->rssi;
9149 			status = QDF_STATUS_SUCCESS;
9150 			break;
9151 		}
9152 	}
9153 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
9154 	return status;
9155 }
9156 
9157 static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
9158 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
9159 		uint8_t chan_num)
9160 {
9161 
9162 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
9163 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
9164 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
9165 
9166 	pdev->nac_rssi_filtering = 1;
9167 	/* Store address of NAC (neighbour peer) which will be checked
9168 	 * against TA of received packets.
9169 	 */
9170 
9171 	if (cmd == CDP_NAC_PARAM_ADD) {
9172 		dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
9173 						 client_macaddr);
9174 	} else if (cmd == CDP_NAC_PARAM_DEL) {
9175 		dp_update_filter_neighbour_peers(vdev_handle,
9176 						 DP_NAC_PARAM_DEL,
9177 						 client_macaddr);
9178 	}
9179 
9180 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
9181 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
9182 			(soc->ctrl_psoc, pdev->pdev_id,
9183 			 vdev->vdev_id, cmd, bssid, client_macaddr);
9184 
9185 	return QDF_STATUS_SUCCESS;
9186 }
9187 #endif
9188 
9189 /**
9190  * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
9191  * for pktlog
9192  * @txrx_pdev_handle: cdp_pdev handle
9193  * @enb_dsb: Enable or disable peer based filtering
9194  *
9195  * Return: QDF_STATUS
9196  */
9197 static int
9198 dp_enable_peer_based_pktlog(
9199 	struct cdp_pdev *txrx_pdev_handle,
9200 	char *mac_addr, uint8_t enb_dsb)
9201 {
9202 	struct dp_peer *peer;
9203 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev_handle;
9204 
9205 	peer = (struct dp_peer *)dp_find_peer_by_addr(txrx_pdev_handle,
9206 			mac_addr);
9207 
9208 	if (!peer) {
9209 		dp_err("Invalid Peer");
9210 		return QDF_STATUS_E_FAILURE;
9211 	}
9212 
9213 	peer->peer_based_pktlog_filter = enb_dsb;
9214 	pdev->dp_peer_based_pktlog = enb_dsb;
9215 
9216 	return QDF_STATUS_SUCCESS;
9217 }
9218 
9219 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
9220 /**
9221  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
9222  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
9223  * @pdev_handle: cdp_pdev handle
9224  * @protocol_type: protocol type for which stats should be displayed
9225  *
9226  * Return: none
9227  */
9228 static inline void
9229 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_pdev *pdev_handle,
9230 				   uint16_t protocol_type)
9231 {
9232 }
9233 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
9234 
9235 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
9236 /**
9237  * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
9238  * applied to the desired protocol type packets
9239  * @txrx_pdev_handle: cdp_pdev handle
9240  * @enable_rx_protocol_tag - bitmask that indicates what protocol types
9241  * are enabled for tagging. zero indicates disable feature, non-zero indicates
9242  * enable feature
9243  * @protocol_type: new protocol type for which the tag is being added
9244  * @tag: user configured tag for the new protocol
9245  *
9246  * Return: Success
9247  */
9248 static inline QDF_STATUS
9249 dp_update_pdev_rx_protocol_tag(struct cdp_pdev *pdev_handle,
9250 			       uint32_t enable_rx_protocol_tag,
9251 			       uint16_t protocol_type,
9252 			       uint16_t tag)
9253 {
9254 	return QDF_STATUS_SUCCESS;
9255 }
9256 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
9257 
9258 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
9259 /**
9260  * dp_set_rx_flow_tag - add/delete a flow
9261  * @pdev_handle: cdp_pdev handle
9262  * @flow_info: flow tuple that is to be added to/deleted from flow search table
9263  *
9264  * Return: Success
9265  */
9266 static inline QDF_STATUS
9267 dp_set_rx_flow_tag(struct cdp_pdev *pdev_handle,
9268 		   struct cdp_rx_flow_info *flow_info)
9269 {
9270 	return QDF_STATUS_SUCCESS;
9271 }
9272 /**
9273  * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
9274  * given flow 5-tuple
9275  * @pdev_handle: cdp_pdev handle
9276  * @flow_info: flow 5-tuple for which stats should be displayed
9277  *
9278  * Return: Success
9279  */
9280 static inline QDF_STATUS
9281 dp_dump_rx_flow_tag_stats(struct cdp_pdev *pdev_handle,
9282 			  struct cdp_rx_flow_info *flow_info)
9283 {
9284 	return QDF_STATUS_SUCCESS;
9285 }
9286 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
9287 
9288 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
9289 					   uint32_t max_peers,
9290 					   uint32_t max_ast_index,
9291 					   bool peer_map_unmap_v2)
9292 {
9293 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9294 
9295 	soc->max_peers = max_peers;
9296 
9297 	qdf_print ("%s max_peers %u, max_ast_index: %u\n",
9298 		   __func__, max_peers, max_ast_index);
9299 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
9300 
9301 	if (dp_peer_find_attach(soc))
9302 		return QDF_STATUS_E_FAILURE;
9303 
9304 	soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
9305 
9306 	return QDF_STATUS_SUCCESS;
9307 }
9308 
9309 static QDF_STATUS dp_set_rate_stats_cap(struct cdp_soc_t *soc_hdl,
9310 					uint8_t val)
9311 {
9312 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9313 
9314 	soc->wlanstats_enabled = val;
9315 
9316 	return QDF_STATUS_SUCCESS;
9317 }
9318 
9319 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
9320 				      void *stats_ctx)
9321 {
9322 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9323 
9324 	soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx;
9325 }
9326 
9327 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9328 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
9329 					  uint8_t pdev_id)
9330 {
9331 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9332 	struct dp_vdev *vdev = NULL;
9333 	struct dp_peer *peer = NULL;
9334 	struct dp_pdev *pdev =
9335 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9336 						   pdev_id);
9337 
9338 	if (!pdev)
9339 		return QDF_STATUS_E_FAILURE;
9340 
9341 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
9342 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
9343 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
9344 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
9345 			if (peer && !peer->bss_peer)
9346 				dp_wdi_event_handler(
9347 					WDI_EVENT_FLUSH_RATE_STATS_REQ,
9348 					soc, peer->wlanstats_ctx,
9349 					peer->peer_ids[0],
9350 					WDI_NO_VAL, pdev_id);
9351 		}
9352 	}
9353 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
9354 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
9355 
9356 	return QDF_STATUS_SUCCESS;
9357 }
9358 #else
9359 static inline QDF_STATUS
9360 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
9361 			uint8_t pdev_id)
9362 {
9363 	return QDF_STATUS_SUCCESS;
9364 }
9365 #endif
9366 
9367 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9368 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
9369 					   uint8_t pdev_id,
9370 					   void *buf)
9371 {
9372 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
9373 			      (struct dp_soc *)soc, buf, HTT_INVALID_PEER,
9374 			      WDI_NO_VAL, pdev_id);
9375 	return QDF_STATUS_SUCCESS;
9376 }
9377 #else
9378 static inline QDF_STATUS
9379 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
9380 			 uint8_t pdev_id,
9381 			 void *buf)
9382 {
9383 	return QDF_STATUS_SUCCESS;
9384 }
9385 #endif
9386 
9387 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
9388 {
9389 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9390 
9391 	return soc->rate_stats_ctx;
9392 }
9393 
9394 /*
9395  * dp_get_cfg() - get dp cfg
9396  * @soc: cdp soc handle
9397  * @cfg: cfg enum
9398  *
9399  * Return: cfg value
9400  */
9401 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
9402 {
9403 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
9404 	uint32_t value = 0;
9405 
9406 	switch (cfg) {
9407 	case cfg_dp_enable_data_stall:
9408 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
9409 		break;
9410 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
9411 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
9412 		break;
9413 	case cfg_dp_tso_enable:
9414 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
9415 		break;
9416 	case cfg_dp_lro_enable:
9417 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
9418 		break;
9419 	case cfg_dp_gro_enable:
9420 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
9421 		break;
9422 	case cfg_dp_tx_flow_start_queue_offset:
9423 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
9424 		break;
9425 	case cfg_dp_tx_flow_stop_queue_threshold:
9426 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
9427 		break;
9428 	case cfg_dp_disable_intra_bss_fwd:
9429 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
9430 		break;
9431 	default:
9432 		value =  0;
9433 	}
9434 
9435 	return value;
9436 }
9437 
9438 #ifdef PEER_FLOW_CONTROL
9439 /**
9440  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
9441  * @soc_handle: datapath soc handle
9442  * @pdev_id: id of datapath pdev handle
9443  * @param: ol ath params
9444  * @value: value of the flag
9445  * @buff: Buffer to be passed
9446  *
9447  * Implemented this function same as legacy function. In legacy code, single
9448  * function is used to display stats and update pdev params.
9449  *
9450  * Return: 0 for success. nonzero for failure.
9451  */
9452 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle,
9453 					       uint8_t pdev_id,
9454 					       enum _ol_ath_param_t param,
9455 					       uint32_t value, void *buff)
9456 {
9457 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9458 	struct dp_pdev *pdev =
9459 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9460 						   pdev_id);
9461 
9462 	if (qdf_unlikely(!pdev))
9463 		return 1;
9464 
9465 	soc = pdev->soc;
9466 	if (!soc)
9467 		return 1;
9468 
9469 	switch (param) {
9470 #ifdef QCA_ENH_V3_STATS_SUPPORT
9471 	case OL_ATH_PARAM_VIDEO_DELAY_STATS_FC:
9472 		if (value)
9473 			pdev->delay_stats_flag = true;
9474 		else
9475 			pdev->delay_stats_flag = false;
9476 		break;
9477 	case OL_ATH_PARAM_VIDEO_STATS_FC:
9478 		qdf_print("------- TID Stats ------\n");
9479 		dp_pdev_print_tid_stats(pdev);
9480 		qdf_print("------ Delay Stats ------\n");
9481 		dp_pdev_print_delay_stats(pdev);
9482 		break;
9483 #endif
9484 	case OL_ATH_PARAM_TOTAL_Q_SIZE:
9485 		{
9486 			uint32_t tx_min, tx_max;
9487 
9488 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
9489 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
9490 
9491 			if (!buff) {
9492 				if ((value >= tx_min) && (value <= tx_max)) {
9493 					pdev->num_tx_allowed = value;
9494 				} else {
9495 					QDF_TRACE(QDF_MODULE_ID_DP,
9496 						  QDF_TRACE_LEVEL_INFO,
9497 						  "Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
9498 						  tx_min, tx_max);
9499 					break;
9500 				}
9501 			} else {
9502 				*(int *)buff = pdev->num_tx_allowed;
9503 			}
9504 		}
9505 		break;
9506 	default:
9507 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
9508 			  "%s: not handled param %d ", __func__, param);
9509 		break;
9510 	}
9511 
9512 	return 0;
9513 }
9514 #endif
9515 
9516 /**
9517  * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
9518  * @psoc: dp soc handle
9519  * @pdev_id: id of DP_PDEV handle
9520  * @pcp: pcp value
9521  * @tid: tid value passed by the user
9522  *
9523  * Return: QDF_STATUS_SUCCESS on success
9524  */
9525 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
9526 						uint8_t pdev_id,
9527 						uint8_t pcp, uint8_t tid)
9528 {
9529 	struct dp_soc *soc = (struct dp_soc *)psoc;
9530 
9531 	soc->pcp_tid_map[pcp] = tid;
9532 
9533 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
9534 	return QDF_STATUS_SUCCESS;
9535 }
9536 
9537 /**
9538  * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
9539  * @vdev: DP_PDEV handle
9540  * @prio: tidmap priority value passed by the user
9541  *
9542  * Return: QDF_STATUS_SUCCESS on success
9543  */
9544 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct cdp_pdev *pdev_handle,
9545 						uint8_t prio)
9546 {
9547 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9548 	struct dp_soc *soc = pdev->soc;
9549 
9550 	soc->tidmap_prty = prio;
9551 
9552 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
9553 	return QDF_STATUS_SUCCESS;
9554 }
9555 
9556 /**
9557  * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
9558  * @soc: DP soc handle
9559  * @vdev_id: id of DP_VDEV handle
9560  * @pcp: pcp value
9561  * @tid: tid value passed by the user
9562  *
9563  * Return: QDF_STATUS_SUCCESS on success
9564  */
9565 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc,
9566 						uint8_t vdev_id,
9567 						uint8_t pcp, uint8_t tid)
9568 {
9569 	struct dp_vdev *vdev =
9570 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
9571 						   vdev_id);
9572 
9573 	if (!vdev)
9574 		return QDF_STATUS_E_FAILURE;
9575 
9576 	vdev->pcp_tid_map[pcp] = tid;
9577 
9578 	return QDF_STATUS_SUCCESS;
9579 }
9580 
9581 /**
9582  * dp_set_vdev_tidmap_tbl_id_wifi3(): update tidmapi tbl id in vdev
9583  * @vdev: DP_VDEV handle
9584  * @mapid: map_id value passed by the user
9585  *
9586  * Return: QDF_STATUS_SUCCESS on success
9587  */
9588 static QDF_STATUS dp_set_vdev_tidmap_tbl_id_wifi3(struct cdp_vdev *vdev_handle,
9589 						  uint8_t mapid)
9590 {
9591 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
9592 
9593 	vdev->tidmap_tbl_id = mapid;
9594 
9595 	return QDF_STATUS_SUCCESS;
9596 }
9597 
9598 /**
9599  * dp_set_vdev_tidmap_prty_wifi3(): update tidmap priority in vdev
9600  * @vdev: DP_VDEV handle
9601  * @prio: tidmap priority value passed by the user
9602  *
9603  * Return: QDF_STATUS_SUCCESS on success
9604  */
9605 static QDF_STATUS dp_set_vdev_tidmap_prty_wifi3(struct cdp_vdev *vdev_handle,
9606 						uint8_t prio)
9607 {
9608 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
9609 
9610 	vdev->tidmap_prty = prio;
9611 
9612 	return QDF_STATUS_SUCCESS;
9613 }
9614 
9615 static struct cdp_cmn_ops dp_ops_cmn = {
9616 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
9617 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
9618 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
9619 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
9620 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
9621 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
9622 	.txrx_peer_create = dp_peer_create_wifi3,
9623 	.txrx_peer_setup = dp_peer_setup_wifi3,
9624 #ifdef FEATURE_AST
9625 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
9626 #else
9627 	.txrx_peer_teardown = NULL,
9628 #endif
9629 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
9630 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
9631 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
9632 	.txrx_peer_get_ast_info_by_pdev =
9633 		dp_peer_get_ast_info_by_pdevid_wifi3,
9634 	.txrx_peer_ast_delete_by_soc =
9635 		dp_peer_ast_entry_del_by_soc,
9636 	.txrx_peer_ast_delete_by_pdev =
9637 		dp_peer_ast_entry_del_by_pdev,
9638 	.txrx_peer_delete = dp_peer_delete_wifi3,
9639 	.txrx_vdev_register = dp_vdev_register_wifi3,
9640 	.txrx_soc_detach = dp_soc_detach_wifi3,
9641 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
9642 	.txrx_soc_init = dp_soc_init_wifi3,
9643 	.txrx_tso_soc_attach = dp_tso_soc_attach,
9644 	.txrx_tso_soc_detach = dp_tso_soc_detach,
9645 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
9646 	.txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
9647 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
9648 	.txrx_ath_getstats = dp_get_device_stats,
9649 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
9650 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
9651 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
9652 	.delba_process = dp_delba_process_wifi3,
9653 	.set_addba_response = dp_set_addba_response,
9654 	.flush_cache_rx_queue = NULL,
9655 	/* TODO: get API's for dscp-tid need to be added*/
9656 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
9657 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
9658 	.hmmc_tid_override_en = dp_hmmc_tid_override_en_wifi3,
9659 	.set_hmmc_tid_val = dp_set_hmmc_tid_val_wifi3,
9660 	.txrx_get_total_per = dp_get_total_per,
9661 	.txrx_stats_request = dp_txrx_stats_request,
9662 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
9663 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
9664 	.txrx_get_vow_config_frm_pdev = dp_get_delay_stats_flag,
9665 	.txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
9666 	.txrx_set_nac = dp_set_nac,
9667 	.txrx_get_tx_pending = dp_get_tx_pending,
9668 	.txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
9669 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
9670 	.display_stats = dp_txrx_dump_stats,
9671 	.txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
9672 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
9673 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
9674 	.txrx_intr_detach = dp_soc_interrupt_detach,
9675 	.set_pn_check = dp_set_pn_check_wifi3,
9676 	.update_config_parameters = dp_update_config_parameters,
9677 	/* TODO: Add other functions */
9678 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
9679 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
9680 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
9681 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
9682 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
9683 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
9684 	.set_pdev_status_down = dp_soc_set_pdev_status_down,
9685 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
9686 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
9687 	.tx_send = dp_tx_send,
9688 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
9689 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
9690 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
9691 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
9692 	.txrx_get_os_rx_handles_from_vdev =
9693 					dp_get_os_rx_handles_from_vdev_wifi3,
9694 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
9695 	.get_dp_capabilities = dp_get_cfg_capabilities,
9696 	.txrx_get_cfg = dp_get_cfg,
9697 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
9698 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
9699 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
9700 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
9701 
9702 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
9703 	.set_pdev_tidmap_prty = dp_set_pdev_tidmap_prty_wifi3,
9704 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
9705 	.set_vdev_tidmap_prty = dp_set_vdev_tidmap_prty_wifi3,
9706 	.set_vdev_tidmap_tbl_id = dp_set_vdev_tidmap_tbl_id_wifi3,
9707 
9708 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
9709 #ifdef QCA_MULTIPASS_SUPPORT
9710 	.set_vlan_groupkey = dp_set_vlan_groupkey,
9711 #endif
9712 };
9713 
9714 static struct cdp_ctrl_ops dp_ops_ctrl = {
9715 	.txrx_peer_authorize = dp_peer_authorize,
9716 	.txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
9717 	.txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
9718 #ifdef MESH_MODE_SUPPORT
9719 	.txrx_set_mesh_mode  = dp_peer_set_mesh_mode,
9720 	.txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
9721 #endif
9722 	.txrx_set_vdev_param = dp_set_vdev_param,
9723 	.txrx_peer_set_nawds = dp_peer_set_nawds,
9724 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
9725 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
9726 	.txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
9727 	.txrx_update_filter_neighbour_peers =
9728 		dp_update_filter_neighbour_peers,
9729 	.txrx_get_sec_type = dp_get_sec_type,
9730 	/* TODO: Add other functions */
9731 	.txrx_wdi_event_sub = dp_wdi_event_sub,
9732 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
9733 #ifdef WDI_EVENT_ENABLE
9734 	.txrx_get_pldev = dp_get_pldev,
9735 #endif
9736 	.txrx_set_pdev_param = dp_set_pdev_param,
9737 #ifdef ATH_SUPPORT_NAC_RSSI
9738 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
9739 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
9740 #endif
9741 	.set_key = dp_set_michael_key,
9742 	.txrx_get_vdev_param = dp_get_vdev_param,
9743 	.enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
9744 	.calculate_delay_stats = dp_calculate_delay_stats,
9745 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
9746 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
9747 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
9748 	.txrx_dump_pdev_rx_protocol_tag_stats =
9749 				dp_dump_pdev_rx_protocol_tag_stats,
9750 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
9751 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
9752 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
9753 	.txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
9754 	.txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
9755 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
9756 #ifdef QCA_MULTIPASS_SUPPORT
9757 	.txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
9758 #endif /*QCA_MULTIPASS_SUPPORT*/
9759 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
9760 	.txrx_update_peer_pkt_capture_params =
9761 		 dp_peer_update_pkt_capture_params,
9762 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
9763 };
9764 
9765 static struct cdp_me_ops dp_ops_me = {
9766 #ifdef ATH_SUPPORT_IQUE
9767 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
9768 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
9769 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
9770 #endif
9771 };
9772 
9773 static struct cdp_mon_ops dp_ops_mon = {
9774 	.txrx_monitor_set_filter_ucast_data = NULL,
9775 	.txrx_monitor_set_filter_mcast_data = NULL,
9776 	.txrx_monitor_set_filter_non_data = NULL,
9777 	.txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
9778 	.txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
9779 	.txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
9780 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
9781 	/* Added support for HK advance filter */
9782 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
9783 	.txrx_monitor_record_channel = dp_pdev_set_monitor_channel,
9784 	.txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt,
9785 	.txrx_set_bsscolor = dp_mon_set_bsscolor,
9786 };
9787 
9788 static struct cdp_host_stats_ops dp_ops_host_stats = {
9789 	.txrx_per_peer_stats = dp_get_host_peer_stats,
9790 	.get_fw_peer_stats = dp_get_fw_peer_stats,
9791 	.get_htt_stats = dp_get_htt_stats,
9792 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
9793 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
9794 	.txrx_stats_publish = dp_txrx_stats_publish,
9795 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
9796 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
9797 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
9798 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
9799 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
9800 	.configure_rate_stats = dp_set_rate_stats_cap,
9801 	.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
9802 	/* TODO */
9803 };
9804 
9805 static struct cdp_raw_ops dp_ops_raw = {
9806 	/* TODO */
9807 };
9808 
9809 #ifdef PEER_FLOW_CONTROL
9810 static struct cdp_pflow_ops dp_ops_pflow = {
9811 	dp_tx_flow_ctrl_configure_pdev,
9812 };
9813 #endif /* CONFIG_WIN */
9814 
9815 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
9816 static struct cdp_cfr_ops dp_ops_cfr = {
9817 	.txrx_cfr_filter = dp_cfr_filter,
9818 };
9819 #endif
9820 
9821 #ifdef FEATURE_RUNTIME_PM
9822 /**
9823  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
9824  * @soc_hdl: Datapath soc handle
9825  * @pdev_id: id of data path pdev handle
9826  *
9827  * DP is ready to runtime suspend if there are no pending TX packets.
9828  *
9829  * Return: QDF_STATUS
9830  */
9831 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
9832 {
9833 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9834 	struct dp_pdev *pdev;
9835 
9836 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
9837 	if (!pdev) {
9838 		dp_err("pdev is NULL");
9839 		return QDF_STATUS_E_INVAL;
9840 	}
9841 
9842 	/* Abort if there are any pending TX packets */
9843 	if (dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev)) > 0) {
9844 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
9845 			  FL("Abort suspend due to pending TX packets"));
9846 		return QDF_STATUS_E_AGAIN;
9847 	}
9848 
9849 	if (soc->intr_mode == DP_INTR_POLL)
9850 		qdf_timer_stop(&soc->int_timer);
9851 
9852 	return QDF_STATUS_SUCCESS;
9853 }
9854 
9855 /**
9856  * dp_flush_ring_hptp() - Update ring shadow
9857  *			  register HP/TP address when runtime
9858  *                        resume
9859  * @opaque_soc: DP soc context
9860  *
9861  * Return: None
9862  */
9863 static
9864 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
9865 {
9866 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
9867 						 HAL_SRNG_FLUSH_EVENT)) {
9868 		/* Acquire the lock */
9869 		hal_srng_access_start(soc->hal_soc, hal_srng);
9870 
9871 		hal_srng_access_end(soc->hal_soc, hal_srng);
9872 
9873 		hal_srng_set_flush_last_ts(hal_srng);
9874 	}
9875 }
9876 
9877 /**
9878  * dp_runtime_resume() - ensure DP is ready to runtime resume
9879  * @soc_hdl: Datapath soc handle
9880  * @pdev_id: id of data path pdev handle
9881  *
9882  * Resume DP for runtime PM.
9883  *
9884  * Return: QDF_STATUS
9885  */
9886 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
9887 {
9888 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9889 	int i;
9890 
9891 	if (soc->intr_mode == DP_INTR_POLL)
9892 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
9893 
9894 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
9895 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
9896 	}
9897 
9898 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
9899 
9900 	return QDF_STATUS_SUCCESS;
9901 }
9902 #endif /* FEATURE_RUNTIME_PM */
9903 
9904 /**
9905  * dp_tx_get_success_ack_stats() - get tx success completion count
9906  * @soc_hdl: Datapath soc handle
9907  * @vdevid: vdev identifier
9908  *
9909  * Return: tx success ack count
9910  */
9911 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
9912 					    uint8_t vdev_id)
9913 {
9914 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9915 	struct cdp_vdev_stats *vdev_stats = NULL;
9916 	uint32_t tx_success;
9917 	struct dp_vdev *vdev =
9918 		(struct dp_vdev *)dp_get_vdev_from_soc_vdev_id_wifi3(soc,
9919 								     vdev_id);
9920 
9921 	if (!vdev) {
9922 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9923 			  FL("Invalid vdev id %d"), vdev_id);
9924 		return 0;
9925 	}
9926 
9927 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
9928 	if (!vdev_stats) {
9929 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9930 			  "DP alloc failure - unable to get alloc vdev stats");
9931 		return 0;
9932 	}
9933 
9934 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
9935 	dp_aggregate_vdev_stats(vdev, vdev_stats);
9936 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
9937 
9938 	tx_success = vdev_stats->tx.tx_success.num;
9939 	qdf_mem_free(vdev_stats);
9940 
9941 	return tx_success;
9942 }
9943 
9944 #ifdef WLAN_SUPPORT_DATA_STALL
9945 /**
9946  * dp_register_data_stall_detect_cb() - register data stall callback
9947  * @soc_hdl: Datapath soc handle
9948  * @pdev_id: id of data path pdev handle
9949  * @data_stall_detect_callback: data stall callback function
9950  *
9951  * Return: QDF_STATUS Enumeration
9952  */
9953 static
9954 QDF_STATUS dp_register_data_stall_detect_cb(
9955 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
9956 			data_stall_detect_cb data_stall_detect_callback)
9957 {
9958 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9959 	struct dp_pdev *pdev;
9960 
9961 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
9962 	if (!pdev) {
9963 		dp_err("pdev NULL!");
9964 		return QDF_STATUS_E_INVAL;
9965 	}
9966 
9967 	pdev->data_stall_detect_callback = data_stall_detect_callback;
9968 	return QDF_STATUS_SUCCESS;
9969 }
9970 
9971 /**
9972  * dp_deregister_data_stall_detect_cb() - de-register data stall callback
9973  * @soc_hdl: Datapath soc handle
9974  * @pdev_id: id of data path pdev handle
9975  * @data_stall_detect_callback: data stall callback function
9976  *
9977  * Return: QDF_STATUS Enumeration
9978  */
9979 static
9980 QDF_STATUS dp_deregister_data_stall_detect_cb(
9981 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
9982 			data_stall_detect_cb data_stall_detect_callback)
9983 {
9984 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9985 	struct dp_pdev *pdev;
9986 
9987 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
9988 	if (!pdev) {
9989 		dp_err("pdev NULL!");
9990 		return QDF_STATUS_E_INVAL;
9991 	}
9992 
9993 	pdev->data_stall_detect_callback = NULL;
9994 	return QDF_STATUS_SUCCESS;
9995 }
9996 
9997 /**
9998  * dp_txrx_post_data_stall_event() - post data stall event
9999  * @soc_hdl: Datapath soc handle
10000  * @indicator: Module triggering data stall
10001  * @data_stall_type: data stall event type
10002  * @pdev_id: pdev id
10003  * @vdev_id_bitmap: vdev id bitmap
10004  * @recovery_type: data stall recovery type
10005  *
10006  * Return: None
10007  */
10008 static void
10009 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
10010 			      enum data_stall_log_event_indicator indicator,
10011 			      enum data_stall_log_event_type data_stall_type,
10012 			      uint32_t pdev_id, uint32_t vdev_id_bitmap,
10013 			      enum data_stall_log_recovery_type recovery_type)
10014 {
10015 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10016 	struct data_stall_event_info data_stall_info;
10017 	struct dp_pdev *pdev;
10018 
10019 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10020 	if (!pdev) {
10021 		dp_err("pdev NULL!");
10022 		return;
10023 	}
10024 
10025 	if (!pdev->data_stall_detect_callback) {
10026 		dp_err("data stall cb not registered!");
10027 		return;
10028 	}
10029 
10030 	dp_info("data_stall_type: %x pdev_id: %d",
10031 		data_stall_type, pdev_id);
10032 
10033 	data_stall_info.indicator = indicator;
10034 	data_stall_info.data_stall_type = data_stall_type;
10035 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
10036 	data_stall_info.pdev_id = pdev_id;
10037 	data_stall_info.recovery_type = recovery_type;
10038 
10039 	pdev->data_stall_detect_callback(&data_stall_info);
10040 }
10041 #endif /* WLAN_SUPPORT_DATA_STALL */
10042 
10043 #ifdef WLAN_FEATURE_STATS_EXT
10044 /* rx hw stats event wait timeout in ms */
10045 #define DP_REO_STATUS_STATS_TIMEOUT 1000
10046 /**
10047  * dp_txrx_ext_stats_request - request dp txrx extended stats request
10048  * @soc_hdl: soc handle
10049  * @pdev_id: pdev id
10050  * @req: stats request
10051  *
10052  * Return: QDF_STATUS
10053  */
10054 static QDF_STATUS
10055 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10056 			  struct cdp_txrx_ext_stats *req)
10057 {
10058 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10059 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10060 
10061 	if (!pdev) {
10062 		dp_err("pdev is null");
10063 		return QDF_STATUS_E_INVAL;
10064 	}
10065 
10066 	dp_aggregate_pdev_stats(pdev);
10067 
10068 	req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num;
10069 	req->tx_msdu_overflow = pdev->stats.tx_i.dropped.ring_full;
10070 	req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
10071 	req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received;
10072 	req->rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
10073 	req->rx_mpdu_error = soc->stats.rx.err_ring_pkts -
10074 				soc->stats.rx.rx_frags;
10075 
10076 	return QDF_STATUS_SUCCESS;
10077 }
10078 
10079 /**
10080  * dp_rx_hw_stats_cb - request rx hw stats response callback
10081  * @soc: soc handle
10082  * @cb_ctxt: callback context
10083  * @reo_status: reo command response status
10084  *
10085  * Return: None
10086  */
10087 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
10088 			      union hal_reo_status *reo_status)
10089 {
10090 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
10091 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
10092 
10093 	if (soc->ignore_reo_status_cb) {
10094 		qdf_event_set(&soc->rx_hw_stats_event);
10095 		return;
10096 	}
10097 
10098 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
10099 		dp_info("REO stats failure %d for TID %d",
10100 			queue_status->header.status, rx_tid->tid);
10101 		return;
10102 	}
10103 
10104 	soc->ext_stats.rx_mpdu_received += queue_status->mpdu_frms_cnt;
10105 	soc->ext_stats.rx_mpdu_missed += queue_status->late_recv_mpdu_cnt;
10106 
10107 	if (rx_tid->tid == (DP_MAX_TIDS - 1))
10108 		qdf_event_set(&soc->rx_hw_stats_event);
10109 }
10110 
10111 /**
10112  * dp_request_rx_hw_stats - request rx hardware stats
10113  * @soc_hdl: soc handle
10114  * @vdev_id: vdev id
10115  *
10116  * Return: None
10117  */
10118 static void
10119 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
10120 {
10121 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10122 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
10123 	struct dp_peer *peer;
10124 
10125 	if (!vdev) {
10126 		dp_err("vdev is null");
10127 		qdf_event_set(&soc->rx_hw_stats_event);
10128 		return;
10129 	}
10130 
10131 	peer = vdev->vap_bss_peer;
10132 
10133 	if (!peer || peer->delete_in_progress) {
10134 		dp_err("Peer deletion in progress");
10135 		qdf_event_set(&soc->rx_hw_stats_event);
10136 		return;
10137 	}
10138 
10139 	qdf_event_reset(&soc->rx_hw_stats_event);
10140 	dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, NULL);
10141 }
10142 
10143 /**
10144  * dp_wait_for_ext_rx_stats - wait for rx reo status for rx stats
10145  * @soc_hdl: cdp opaque soc handle
10146  *
10147  * Return: status
10148  */
10149 static QDF_STATUS
10150 dp_wait_for_ext_rx_stats(struct cdp_soc_t *soc_hdl)
10151 {
10152 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10153 	QDF_STATUS status;
10154 
10155 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
10156 				       DP_REO_STATUS_STATS_TIMEOUT);
10157 
10158 	return status;
10159 }
10160 #endif /* WLAN_FEATURE_STATS_EXT */
10161 
10162 #ifdef DP_PEER_EXTENDED_API
10163 static struct cdp_misc_ops dp_ops_misc = {
10164 #ifdef FEATURE_WLAN_TDLS
10165 	.tx_non_std = dp_tx_non_std,
10166 #endif /* FEATURE_WLAN_TDLS */
10167 	.get_opmode = dp_get_opmode,
10168 #ifdef FEATURE_RUNTIME_PM
10169 	.runtime_suspend = dp_runtime_suspend,
10170 	.runtime_resume = dp_runtime_resume,
10171 #endif /* FEATURE_RUNTIME_PM */
10172 	.pkt_log_init = dp_pkt_log_init,
10173 	.pkt_log_con_service = dp_pkt_log_con_service,
10174 	.get_num_rx_contexts = dp_get_num_rx_contexts,
10175 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
10176 #ifdef WLAN_SUPPORT_DATA_STALL
10177 	.txrx_data_stall_cb_register = dp_register_data_stall_detect_cb,
10178 	.txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb,
10179 	.txrx_post_data_stall_event = dp_txrx_post_data_stall_event,
10180 #endif
10181 
10182 #ifdef WLAN_FEATURE_STATS_EXT
10183 	.txrx_ext_stats_request = dp_txrx_ext_stats_request,
10184 	.request_rx_hw_stats = dp_request_rx_hw_stats,
10185 	.wait_for_ext_rx_stats = dp_wait_for_ext_rx_stats,
10186 #endif
10187 };
10188 #endif
10189 
10190 #ifdef DP_FLOW_CTL
10191 static struct cdp_flowctl_ops dp_ops_flowctl = {
10192 	/* WIFI 3.0 DP implement as required. */
10193 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
10194 	.flow_pool_map_handler = dp_tx_flow_pool_map,
10195 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
10196 	.register_pause_cb = dp_txrx_register_pause_cb,
10197 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
10198 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
10199 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
10200 };
10201 
10202 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
10203 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
10204 };
10205 #endif
10206 
10207 #ifdef IPA_OFFLOAD
10208 static struct cdp_ipa_ops dp_ops_ipa = {
10209 	.ipa_get_resource = dp_ipa_get_resource,
10210 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
10211 	.ipa_op_response = dp_ipa_op_response,
10212 	.ipa_register_op_cb = dp_ipa_register_op_cb,
10213 	.ipa_get_stat = dp_ipa_get_stat,
10214 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
10215 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
10216 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
10217 	.ipa_setup = dp_ipa_setup,
10218 	.ipa_cleanup = dp_ipa_cleanup,
10219 	.ipa_setup_iface = dp_ipa_setup_iface,
10220 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
10221 	.ipa_enable_pipes = dp_ipa_enable_pipes,
10222 	.ipa_disable_pipes = dp_ipa_disable_pipes,
10223 	.ipa_set_perf_level = dp_ipa_set_perf_level,
10224 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd
10225 };
10226 #endif
10227 
10228 #ifdef DP_POWER_SAVE
10229 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10230 {
10231 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10232 	struct cdp_pdev *pdev = (struct cdp_pdev *)
10233 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10234 	int timeout = SUSPEND_DRAIN_WAIT;
10235 	int drain_wait_delay = 50; /* 50 ms */
10236 
10237 	if (qdf_unlikely(!pdev)) {
10238 		dp_err("pdev is NULL");
10239 		return QDF_STATUS_E_INVAL;
10240 	}
10241 
10242 	/* Abort if there are any pending TX packets */
10243 	while (dp_get_tx_pending(pdev) > 0) {
10244 		qdf_sleep(drain_wait_delay);
10245 		if (timeout <= 0) {
10246 			dp_err("TX frames are pending, abort suspend");
10247 			return QDF_STATUS_E_TIMEOUT;
10248 		}
10249 		timeout = timeout - drain_wait_delay;
10250 	}
10251 
10252 	if (soc->intr_mode == DP_INTR_POLL)
10253 		qdf_timer_stop(&soc->int_timer);
10254 
10255 	return QDF_STATUS_SUCCESS;
10256 }
10257 
10258 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10259 {
10260 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10261 
10262 	if (soc->intr_mode == DP_INTR_POLL)
10263 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
10264 
10265 	return QDF_STATUS_SUCCESS;
10266 }
10267 
10268 static struct cdp_bus_ops dp_ops_bus = {
10269 	.bus_suspend = dp_bus_suspend,
10270 	.bus_resume = dp_bus_resume
10271 };
10272 #endif
10273 
10274 #ifdef DP_FLOW_CTL
10275 static struct cdp_throttle_ops dp_ops_throttle = {
10276 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
10277 };
10278 
10279 static struct cdp_cfg_ops dp_ops_cfg = {
10280 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
10281 };
10282 #endif
10283 
10284 #ifdef DP_PEER_EXTENDED_API
10285 static struct cdp_ocb_ops dp_ops_ocb = {
10286 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
10287 };
10288 
10289 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
10290 	.clear_stats = dp_txrx_clear_dump_stats,
10291 };
10292 
10293 /*
10294  * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
10295  * @dev: physical device instance
10296  * @peer_mac_addr: peer mac address
10297  * @debug_id: to track enum peer access
10298  *
10299  * Return: peer instance pointer
10300  */
10301 static inline void *
10302 dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
10303 			     enum peer_debug_id_type debug_id)
10304 {
10305 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
10306 	struct dp_peer *peer;
10307 
10308 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
10309 
10310 	if (!peer)
10311 		return NULL;
10312 
10313 	dp_info_rl("peer %pK mac: %pM", peer, peer->mac_addr.raw);
10314 
10315 	return peer;
10316 }
10317 
10318 /*
10319  * dp_peer_release_ref - release peer ref count
10320  * @peer: peer handle
10321  * @debug_id: to track enum peer access
10322  *
10323  * Return: None
10324  */
10325 static inline
10326 void dp_peer_release_ref(void *peer, enum peer_debug_id_type debug_id)
10327 {
10328 	dp_peer_unref_delete(peer);
10329 }
10330 
10331 static struct cdp_peer_ops dp_ops_peer = {
10332 	.register_peer = dp_register_peer,
10333 	.clear_peer = dp_clear_peer,
10334 	.find_peer_by_addr = dp_find_peer_by_addr,
10335 	.find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
10336 	.peer_get_ref_by_addr = dp_peer_get_ref_find_by_addr,
10337 	.peer_release_ref = dp_peer_release_ref,
10338 	.peer_state_update = dp_peer_state_update,
10339 	.get_vdevid = dp_get_vdevid,
10340 	.get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
10341 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
10342 	.get_vdev_for_peer = dp_get_vdev_for_peer,
10343 	.get_peer_state = dp_get_peer_state,
10344 };
10345 #endif
10346 
10347 static struct cdp_ops dp_txrx_ops = {
10348 	.cmn_drv_ops = &dp_ops_cmn,
10349 	.ctrl_ops = &dp_ops_ctrl,
10350 	.me_ops = &dp_ops_me,
10351 	.mon_ops = &dp_ops_mon,
10352 	.host_stats_ops = &dp_ops_host_stats,
10353 	.wds_ops = &dp_ops_wds,
10354 	.raw_ops = &dp_ops_raw,
10355 #ifdef PEER_FLOW_CONTROL
10356 	.pflow_ops = &dp_ops_pflow,
10357 #endif /* PEER_FLOW_CONTROL */
10358 #ifdef DP_PEER_EXTENDED_API
10359 	.misc_ops = &dp_ops_misc,
10360 	.ocb_ops = &dp_ops_ocb,
10361 	.peer_ops = &dp_ops_peer,
10362 	.mob_stats_ops = &dp_ops_mob_stats,
10363 #endif
10364 #ifdef DP_FLOW_CTL
10365 	.cfg_ops = &dp_ops_cfg,
10366 	.flowctl_ops = &dp_ops_flowctl,
10367 	.l_flowctl_ops = &dp_ops_l_flowctl,
10368 	.throttle_ops = &dp_ops_throttle,
10369 #endif
10370 #ifdef IPA_OFFLOAD
10371 	.ipa_ops = &dp_ops_ipa,
10372 #endif
10373 #ifdef DP_POWER_SAVE
10374 	.bus_ops = &dp_ops_bus,
10375 #endif
10376 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
10377 	.cfr_ops = &dp_ops_cfr,
10378 #endif
10379 };
10380 
10381 /*
10382  * dp_soc_set_txrx_ring_map()
10383  * @dp_soc: DP handler for soc
10384  *
10385  * Return: Void
10386  */
10387 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
10388 {
10389 	uint32_t i;
10390 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
10391 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
10392 	}
10393 }
10394 
10395 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018)
10396 
10397 #ifndef QCA_MEM_ATTACH_ON_WIFI3
10398 
10399 /**
10400  * dp_soc_attach_wifi3() - Attach txrx SOC
10401  * @ctrl_psoc: Opaque SOC handle from control plane
10402  * @htc_handle: Opaque HTC handle
10403  * @hif_handle: Opaque HIF handle
10404  * @qdf_osdev: QDF device
10405  * @ol_ops: Offload Operations
10406  * @device_id: Device ID
10407  *
10408  * Return: DP SOC handle on success, NULL on failure
10409  */
10410 struct cdp_soc_t *
10411 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
10412 		    struct hif_opaque_softc *hif_handle,
10413 		    HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
10414 		    struct ol_if_ops *ol_ops, uint16_t device_id)
10415 {
10416 	struct dp_soc *dp_soc =  NULL;
10417 
10418 	dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
10419 			       ol_ops, device_id);
10420 	if (!dp_soc)
10421 		return NULL;
10422 
10423 	if (!dp_soc_init(dp_soc, htc_handle, hif_handle))
10424 		return NULL;
10425 
10426 	return dp_soc_to_cdp_soc_t(dp_soc);
10427 }
10428 #else
10429 
10430 /**
10431  * dp_soc_attach_wifi3() - Attach txrx SOC
10432  * @ctrl_psoc: Opaque SOC handle from control plane
10433  * @htc_handle: Opaque HTC handle
10434  * @hif_handle: Opaque HIF handle
10435  * @qdf_osdev: QDF device
10436  * @ol_ops: Offload Operations
10437  * @device_id: Device ID
10438  *
10439  * Return: DP SOC handle on success, NULL on failure
10440  */
10441 struct cdp_soc_t *
10442 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
10443 		    struct hif_opaque_softc *hif_handle,
10444 		    HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
10445 		    struct ol_if_ops *ol_ops, uint16_t device_id)
10446 {
10447 	struct dp_soc *dp_soc = NULL;
10448 
10449 	dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
10450 			       ol_ops, device_id);
10451 	return dp_soc_to_cdp_soc_t(dp_soc);
10452 }
10453 
10454 #endif
10455 
10456 /**
10457  * dp_soc_attach() - Attach txrx SOC
10458  * @ctrl_psoc: Opaque SOC handle from control plane
10459  * @htc_handle: Opaque HTC handle
10460  * @qdf_osdev: QDF device
10461  * @ol_ops: Offload Operations
10462  * @device_id: Device ID
10463  *
10464  * Return: DP SOC handle on success, NULL on failure
10465  */
10466 static struct dp_soc *
10467 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, HTC_HANDLE htc_handle,
10468 	      qdf_device_t qdf_osdev,
10469 	      struct ol_if_ops *ol_ops, uint16_t device_id)
10470 {
10471 	int int_ctx;
10472 	struct dp_soc *soc =  NULL;
10473 	struct htt_soc *htt_soc;
10474 
10475 	soc = qdf_mem_malloc(sizeof(*soc));
10476 
10477 	if (!soc) {
10478 		dp_err("DP SOC memory allocation failed");
10479 		goto fail0;
10480 	}
10481 
10482 	int_ctx = 0;
10483 	soc->device_id = device_id;
10484 	soc->cdp_soc.ops = &dp_txrx_ops;
10485 	soc->cdp_soc.ol_ops = ol_ops;
10486 	soc->ctrl_psoc = ctrl_psoc;
10487 	soc->osdev = qdf_osdev;
10488 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
10489 
10490 	wlan_set_srng_cfg(&soc->wlan_srng_cfg);
10491 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
10492 
10493 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
10494 	if (!soc->wlan_cfg_ctx) {
10495 		dp_err("wlan_cfg_ctx failed\n");
10496 		goto fail1;
10497 	}
10498 
10499 	dp_soc_set_interrupt_mode(soc);
10500 	htt_soc = htt_soc_attach(soc, htc_handle);
10501 
10502 	if (!htt_soc)
10503 		goto fail1;
10504 
10505 	soc->htt_handle = htt_soc;
10506 
10507 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
10508 		goto fail2;
10509 
10510 	return soc;
10511 fail2:
10512 	htt_soc_detach(htt_soc);
10513 fail1:
10514 	qdf_mem_free(soc);
10515 fail0:
10516 	return NULL;
10517 }
10518 
10519 /**
10520  * dp_soc_init() - Initialize txrx SOC
10521  * @dp_soc: Opaque DP SOC handle
10522  * @htc_handle: Opaque HTC handle
10523  * @hif_handle: Opaque HIF handle
10524  *
10525  * Return: DP SOC handle on success, NULL on failure
10526  */
10527 void *dp_soc_init(struct dp_soc *dpsoc, HTC_HANDLE htc_handle,
10528 		  struct hif_opaque_softc *hif_handle)
10529 {
10530 	int target_type;
10531 	struct dp_soc *soc = (struct dp_soc *)dpsoc;
10532 	struct htt_soc *htt_soc = soc->htt_handle;
10533 
10534 	htt_set_htc_handle(htt_soc, htc_handle);
10535 	soc->hif_handle = hif_handle;
10536 
10537 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
10538 	if (!soc->hal_soc)
10539 		return NULL;
10540 
10541 	htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
10542 			   htt_get_htc_handle(htt_soc),
10543 			   soc->hal_soc, soc->osdev);
10544 	target_type = hal_get_target_type(soc->hal_soc);
10545 	switch (target_type) {
10546 	case TARGET_TYPE_QCA6290:
10547 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
10548 					       REO_DST_RING_SIZE_QCA6290);
10549 		soc->ast_override_support = 1;
10550 		soc->da_war_enabled = false;
10551 		break;
10552 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490)
10553 	case TARGET_TYPE_QCA6390:
10554 	case TARGET_TYPE_QCA6490:
10555 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
10556 					       REO_DST_RING_SIZE_QCA6290);
10557 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
10558 		soc->ast_override_support = 1;
10559 		if (soc->cdp_soc.ol_ops->get_con_mode &&
10560 		    soc->cdp_soc.ol_ops->get_con_mode() ==
10561 		    QDF_GLOBAL_MONITOR_MODE) {
10562 			int int_ctx;
10563 
10564 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
10565 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
10566 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
10567 			}
10568 		}
10569 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
10570 		break;
10571 #endif /* QCA_WIFI_QCA6390 || QCA_WIFI_QCA6490 */
10572 
10573 	case TARGET_TYPE_QCA8074:
10574 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
10575 					       REO_DST_RING_SIZE_QCA8074);
10576 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
10577 		soc->da_war_enabled = true;
10578 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
10579 		break;
10580 	case TARGET_TYPE_QCA8074V2:
10581 	case TARGET_TYPE_QCA6018:
10582 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
10583 					       REO_DST_RING_SIZE_QCA8074);
10584 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
10585 		soc->hw_nac_monitor_support = 1;
10586 		soc->ast_override_support = 1;
10587 		soc->per_tid_basize_max_tid = 8;
10588 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
10589 		soc->da_war_enabled = false;
10590 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
10591 		break;
10592 	case TARGET_TYPE_QCN9000:
10593 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
10594 					       REO_DST_RING_SIZE_QCN9000);
10595 		soc->ast_override_support = 1;
10596 		soc->da_war_enabled = false;
10597 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
10598 		soc->hw_nac_monitor_support = 1;
10599 		soc->per_tid_basize_max_tid = 8;
10600 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
10601 		break;
10602 	default:
10603 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
10604 		qdf_assert_always(0);
10605 		break;
10606 	}
10607 
10608 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
10609 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
10610 	soc->cce_disable = false;
10611 
10612 	qdf_atomic_init(&soc->num_tx_outstanding);
10613 	soc->num_tx_allowed =
10614 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
10615 
10616 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
10617 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
10618 				CDP_CFG_MAX_PEER_ID);
10619 
10620 		if (ret != -EINVAL) {
10621 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
10622 		}
10623 
10624 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
10625 				CDP_CFG_CCE_DISABLE);
10626 		if (ret == 1)
10627 			soc->cce_disable = true;
10628 	}
10629 
10630 	qdf_spinlock_create(&soc->peer_ref_mutex);
10631 	qdf_spinlock_create(&soc->ast_lock);
10632 
10633 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
10634 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
10635 
10636 	/* fill the tx/rx cpu ring map*/
10637 	dp_soc_set_txrx_ring_map(soc);
10638 
10639 	qdf_spinlock_create(&soc->htt_stats.lock);
10640 	/* initialize work queue for stats processing */
10641 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
10642 
10643 	return soc;
10644 
10645 }
10646 
10647 /**
10648  * dp_soc_init_wifi3() - Initialize txrx SOC
10649  * @dp_soc: Opaque DP SOC handle
10650  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
10651  * @hif_handle: Opaque HIF handle
10652  * @htc_handle: Opaque HTC handle
10653  * @qdf_osdev: QDF device (Unused)
10654  * @ol_ops: Offload Operations (Unused)
10655  * @device_id: Device ID (Unused)
10656  *
10657  * Return: DP SOC handle on success, NULL on failure
10658  */
10659 void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
10660 			struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
10661 			struct hif_opaque_softc *hif_handle,
10662 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
10663 			struct ol_if_ops *ol_ops, uint16_t device_id)
10664 {
10665 	return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle);
10666 }
10667 
10668 #endif
10669 
10670 /*
10671  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
10672  *
10673  * @soc: handle to DP soc
10674  * @mac_id: MAC id
10675  *
10676  * Return: Return pdev corresponding to MAC
10677  */
10678 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
10679 {
10680 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
10681 		return soc->pdev_list[mac_id];
10682 
10683 	/* Typically for MCL as there only 1 PDEV*/
10684 	return soc->pdev_list[0];
10685 }
10686 
10687 /*
10688  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
10689  * @soc:		DP SoC context
10690  * @max_mac_rings:	No of MAC rings
10691  *
10692  * Return: None
10693  */
10694 static
10695 void dp_is_hw_dbs_enable(struct dp_soc *soc,
10696 				int *max_mac_rings)
10697 {
10698 	bool dbs_enable = false;
10699 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
10700 		dbs_enable = soc->cdp_soc.ol_ops->
10701 		is_hw_dbs_2x2_capable((void *)soc->ctrl_psoc);
10702 
10703 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
10704 }
10705 
10706 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
10707 /*
10708  * dp_cfr_filter() -  Configure HOST RX monitor status ring for CFR
10709  * @soc_hdl: Datapath soc handle
10710  * @pdev_id: id of data path pdev handle
10711  * @enable: Enable/Disable CFR
10712  * @filter_val: Flag to select Filter for monitor mode
10713  */
10714 static void dp_cfr_filter(struct cdp_soc_t *soc_hdl,
10715 			  uint8_t pdev_id,
10716 			  bool enable,
10717 			  struct cdp_monitor_filter *filter_val)
10718 {
10719 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10720 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10721 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
10722 	int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
10723 	uint8_t mac_id = 0;
10724 
10725 	if (pdev->monitor_vdev) {
10726 		dp_info("No action is needed since monitor mode is enabled\n");
10727 		return;
10728 	}
10729 	soc = pdev->soc;
10730 	pdev->cfr_rcc_mode = false;
10731 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
10732 
10733 	dp_debug("Max_mac_rings %d", max_mac_rings);
10734 	dp_info("enable : %d, mode: 0x%x", enable, filter_val->mode);
10735 
10736 	if (enable) {
10737 		pdev->cfr_rcc_mode = true;
10738 
10739 		htt_tlv_filter.ppdu_start = 1;
10740 		htt_tlv_filter.ppdu_end = 1;
10741 		htt_tlv_filter.ppdu_end_user_stats = 1;
10742 		htt_tlv_filter.ppdu_end_user_stats_ext = 1;
10743 		htt_tlv_filter.ppdu_end_status_done = 1;
10744 		htt_tlv_filter.mpdu_start = 1;
10745 		htt_tlv_filter.offset_valid = false;
10746 
10747 		htt_tlv_filter.enable_fp =
10748 			(filter_val->mode & MON_FILTER_PASS) ? 1 : 0;
10749 		htt_tlv_filter.enable_md = 0;
10750 		htt_tlv_filter.enable_mo =
10751 			(filter_val->mode & MON_FILTER_OTHER) ? 1 : 0;
10752 		htt_tlv_filter.fp_mgmt_filter = filter_val->fp_mgmt;
10753 		htt_tlv_filter.fp_ctrl_filter = filter_val->fp_ctrl;
10754 		htt_tlv_filter.fp_data_filter = filter_val->fp_data;
10755 		htt_tlv_filter.mo_mgmt_filter = filter_val->mo_mgmt;
10756 		htt_tlv_filter.mo_ctrl_filter = filter_val->mo_ctrl;
10757 		htt_tlv_filter.mo_data_filter = filter_val->mo_data;
10758 	}
10759 
10760 	for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
10761 		int mac_for_pdev =
10762 			dp_get_mac_id_for_pdev(mac_id,
10763 					       pdev->pdev_id);
10764 
10765 		htt_h2t_rx_ring_cfg(soc->htt_handle,
10766 				    mac_for_pdev,
10767 				    pdev->rxdma_mon_status_ring[mac_id]
10768 				    .hal_srng,
10769 				    RXDMA_MONITOR_STATUS,
10770 				    RX_BUFFER_SIZE,
10771 				    &htt_tlv_filter);
10772 	}
10773 }
10774 #endif
10775 
10776 /*
10777 * dp_is_soc_reinit() - Check if soc reinit is true
10778 * @soc: DP SoC context
10779 *
10780 * Return: true or false
10781 */
10782 bool dp_is_soc_reinit(struct dp_soc *soc)
10783 {
10784 	return soc->dp_soc_reinit;
10785 }
10786 
10787 /*
10788 * dp_set_pktlog_wifi3() - attach txrx vdev
10789 * @pdev: Datapath PDEV handle
10790 * @event: which event's notifications are being subscribed to
10791 * @enable: WDI event subscribe or not. (True or False)
10792 *
10793 * Return: Success, NULL on failure
10794 */
10795 #ifdef WDI_EVENT_ENABLE
10796 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
10797 		bool enable)
10798 {
10799 	struct dp_soc *soc = NULL;
10800 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
10801 	int max_mac_rings = wlan_cfg_get_num_mac_rings
10802 					(pdev->wlan_cfg_ctx);
10803 	uint8_t mac_id = 0;
10804 
10805 	soc = pdev->soc;
10806 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
10807 
10808 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
10809 			FL("Max_mac_rings %d "),
10810 			max_mac_rings);
10811 
10812 	if (enable) {
10813 		switch (event) {
10814 		case WDI_EVENT_RX_DESC:
10815 			if (pdev->monitor_vdev) {
10816 				/* Nothing needs to be done if monitor mode is
10817 				 * enabled
10818 				 */
10819 				return 0;
10820 			}
10821 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
10822 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
10823 				htt_tlv_filter.mpdu_start = 1;
10824 				htt_tlv_filter.msdu_start = 1;
10825 				htt_tlv_filter.msdu_end = 1;
10826 				htt_tlv_filter.mpdu_end = 1;
10827 				htt_tlv_filter.packet_header = 1;
10828 				htt_tlv_filter.attention = 1;
10829 				htt_tlv_filter.ppdu_start = 1;
10830 				htt_tlv_filter.ppdu_end = 1;
10831 				htt_tlv_filter.ppdu_end_user_stats = 1;
10832 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
10833 				htt_tlv_filter.ppdu_end_status_done = 1;
10834 				htt_tlv_filter.enable_fp = 1;
10835 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
10836 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
10837 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
10838 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
10839 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
10840 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
10841 				htt_tlv_filter.offset_valid = false;
10842 
10843 				for (mac_id = 0; mac_id < max_mac_rings;
10844 								mac_id++) {
10845 					int mac_for_pdev =
10846 						dp_get_mac_id_for_pdev(mac_id,
10847 								pdev->pdev_id);
10848 
10849 					htt_h2t_rx_ring_cfg(soc->htt_handle,
10850 					 mac_for_pdev,
10851 					 pdev->rxdma_mon_status_ring[mac_id]
10852 					 .hal_srng,
10853 					 RXDMA_MONITOR_STATUS,
10854 					 RX_BUFFER_SIZE,
10855 					 &htt_tlv_filter);
10856 
10857 				}
10858 
10859 				if (soc->reap_timer_init)
10860 					qdf_timer_mod(&soc->mon_reap_timer,
10861 					DP_INTR_POLL_TIMER_MS);
10862 			}
10863 			break;
10864 
10865 		case WDI_EVENT_LITE_RX:
10866 			if (pdev->monitor_vdev) {
10867 				/* Nothing needs to be done if monitor mode is
10868 				 * enabled
10869 				 */
10870 				return 0;
10871 			}
10872 
10873 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
10874 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
10875 
10876 				htt_tlv_filter.ppdu_start = 1;
10877 				htt_tlv_filter.ppdu_end = 1;
10878 				htt_tlv_filter.ppdu_end_user_stats = 1;
10879 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
10880 				htt_tlv_filter.ppdu_end_status_done = 1;
10881 				htt_tlv_filter.mpdu_start = 1;
10882 				htt_tlv_filter.enable_fp = 1;
10883 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
10884 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
10885 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
10886 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
10887 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
10888 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
10889 				htt_tlv_filter.offset_valid = false;
10890 
10891 				for (mac_id = 0; mac_id < max_mac_rings;
10892 								mac_id++) {
10893 					int mac_for_pdev =
10894 						dp_get_mac_id_for_pdev(mac_id,
10895 								pdev->pdev_id);
10896 
10897 					htt_h2t_rx_ring_cfg(soc->htt_handle,
10898 					mac_for_pdev,
10899 					pdev->rxdma_mon_status_ring[mac_id]
10900 					.hal_srng,
10901 					RXDMA_MONITOR_STATUS,
10902 					RX_BUFFER_SIZE_PKTLOG_LITE,
10903 					&htt_tlv_filter);
10904 				}
10905 
10906 				if (soc->reap_timer_init)
10907 					qdf_timer_mod(&soc->mon_reap_timer,
10908 					DP_INTR_POLL_TIMER_MS);
10909 			}
10910 			break;
10911 
10912 		case WDI_EVENT_LITE_T2H:
10913 			if (pdev->monitor_vdev) {
10914 				/* Nothing needs to be done if monitor mode is
10915 				 * enabled
10916 				 */
10917 				return 0;
10918 			}
10919 
10920 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
10921 				int mac_for_pdev = dp_get_mac_id_for_pdev(
10922 							mac_id,	pdev->pdev_id);
10923 
10924 				pdev->pktlog_ppdu_stats = true;
10925 				dp_h2t_cfg_stats_msg_send(pdev,
10926 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
10927 					mac_for_pdev);
10928 			}
10929 			break;
10930 
10931 		default:
10932 			/* Nothing needs to be done for other pktlog types */
10933 			break;
10934 		}
10935 	} else {
10936 		switch (event) {
10937 		case WDI_EVENT_RX_DESC:
10938 		case WDI_EVENT_LITE_RX:
10939 			if (pdev->monitor_vdev) {
10940 				/* Nothing needs to be done if monitor mode is
10941 				 * enabled
10942 				 */
10943 				return 0;
10944 			}
10945 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
10946 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
10947 
10948 				for (mac_id = 0; mac_id < max_mac_rings;
10949 								mac_id++) {
10950 					int mac_for_pdev =
10951 						dp_get_mac_id_for_pdev(mac_id,
10952 								pdev->pdev_id);
10953 
10954 					htt_h2t_rx_ring_cfg(soc->htt_handle,
10955 					  mac_for_pdev,
10956 					  pdev->rxdma_mon_status_ring[mac_id]
10957 					  .hal_srng,
10958 					  RXDMA_MONITOR_STATUS,
10959 					  RX_BUFFER_SIZE,
10960 					  &htt_tlv_filter);
10961 				}
10962 
10963 				if (soc->reap_timer_init)
10964 					qdf_timer_stop(&soc->mon_reap_timer);
10965 			}
10966 			break;
10967 		case WDI_EVENT_LITE_T2H:
10968 			if (pdev->monitor_vdev) {
10969 				/* Nothing needs to be done if monitor mode is
10970 				 * enabled
10971 				 */
10972 				return 0;
10973 			}
10974 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
10975 			 * passing value 0. Once these macros will define in htt
10976 			 * header file will use proper macros
10977 			*/
10978 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
10979 				int mac_for_pdev =
10980 						dp_get_mac_id_for_pdev(mac_id,
10981 								pdev->pdev_id);
10982 
10983 				pdev->pktlog_ppdu_stats = false;
10984 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
10985 					dp_h2t_cfg_stats_msg_send(pdev, 0,
10986 								mac_for_pdev);
10987 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
10988 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
10989 								mac_for_pdev);
10990 				} else if (pdev->enhanced_stats_en) {
10991 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
10992 								mac_for_pdev);
10993 				}
10994 			}
10995 
10996 			break;
10997 		default:
10998 			/* Nothing needs to be done for other pktlog types */
10999 			break;
11000 		}
11001 	}
11002 	return 0;
11003 }
11004 #endif
11005 
11006 /**
11007  * dp_bucket_index() - Return index from array
11008  *
11009  * @delay: delay measured
11010  * @array: array used to index corresponding delay
11011  *
11012  * Return: index
11013  */
11014 static uint8_t dp_bucket_index(uint32_t delay, uint16_t *array)
11015 {
11016 	uint8_t i = CDP_DELAY_BUCKET_0;
11017 
11018 	for (; i < CDP_DELAY_BUCKET_MAX; i++) {
11019 		if (delay >= array[i] && delay <= array[i + 1])
11020 			return i;
11021 	}
11022 
11023 	return (CDP_DELAY_BUCKET_MAX - 1);
11024 }
11025 
11026 /**
11027  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
11028  *				type of delay
11029  *
11030  * @pdev: pdev handle
11031  * @delay: delay in ms
11032  * @tid: tid value
11033  * @mode: type of tx delay mode
11034  * @ring_id: ring number
11035  * Return: pointer to cdp_delay_stats structure
11036  */
11037 static struct cdp_delay_stats *
11038 dp_fill_delay_buckets(struct dp_pdev *pdev, uint32_t delay,
11039 		      uint8_t tid, uint8_t mode, uint8_t ring_id)
11040 {
11041 	uint8_t delay_index = 0;
11042 	struct cdp_tid_tx_stats *tstats =
11043 		&pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
11044 	struct cdp_tid_rx_stats *rstats =
11045 		&pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
11046 	/*
11047 	 * cdp_fw_to_hw_delay_range
11048 	 * Fw to hw delay ranges in milliseconds
11049 	 */
11050 	uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
11051 		0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
11052 
11053 	/*
11054 	 * cdp_sw_enq_delay_range
11055 	 * Software enqueue delay ranges in milliseconds
11056 	 */
11057 	uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
11058 		0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
11059 
11060 	/*
11061 	 * cdp_intfrm_delay_range
11062 	 * Interframe delay ranges in milliseconds
11063 	 */
11064 	uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
11065 		0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
11066 
11067 	/*
11068 	 * Update delay stats in proper bucket
11069 	 */
11070 	switch (mode) {
11071 	/* Software Enqueue delay ranges */
11072 	case CDP_DELAY_STATS_SW_ENQ:
11073 
11074 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay);
11075 		tstats->swq_delay.delay_bucket[delay_index]++;
11076 		return &tstats->swq_delay;
11077 
11078 	/* Tx Completion delay ranges */
11079 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
11080 
11081 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay);
11082 		tstats->hwtx_delay.delay_bucket[delay_index]++;
11083 		return &tstats->hwtx_delay;
11084 
11085 	/* Interframe tx delay ranges */
11086 	case CDP_DELAY_STATS_TX_INTERFRAME:
11087 
11088 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
11089 		tstats->intfrm_delay.delay_bucket[delay_index]++;
11090 		return &tstats->intfrm_delay;
11091 
11092 	/* Interframe rx delay ranges */
11093 	case CDP_DELAY_STATS_RX_INTERFRAME:
11094 
11095 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
11096 		rstats->intfrm_delay.delay_bucket[delay_index]++;
11097 		return &rstats->intfrm_delay;
11098 
11099 	/* Ring reap to indication to network stack */
11100 	case CDP_DELAY_STATS_REAP_STACK:
11101 
11102 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
11103 		rstats->to_stack_delay.delay_bucket[delay_index]++;
11104 		return &rstats->to_stack_delay;
11105 	default:
11106 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
11107 			  "%s Incorrect delay mode: %d", __func__, mode);
11108 	}
11109 
11110 	return NULL;
11111 }
11112 
11113 /**
11114  * dp_update_delay_stats() - Update delay statistics in structure
11115  *				and fill min, max and avg delay
11116  *
11117  * @pdev: pdev handle
11118  * @delay: delay in ms
11119  * @tid: tid value
11120  * @mode: type of tx delay mode
11121  * @ring id: ring number
11122  * Return: none
11123  */
11124 void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
11125 			   uint8_t tid, uint8_t mode, uint8_t ring_id)
11126 {
11127 	struct cdp_delay_stats *dstats = NULL;
11128 
11129 	/*
11130 	 * Delay ranges are different for different delay modes
11131 	 * Get the correct index to update delay bucket
11132 	 */
11133 	dstats = dp_fill_delay_buckets(pdev, delay, tid, mode, ring_id);
11134 	if (qdf_unlikely(!dstats))
11135 		return;
11136 
11137 	if (delay != 0) {
11138 		/*
11139 		 * Compute minimum,average and maximum
11140 		 * delay
11141 		 */
11142 		if (delay < dstats->min_delay)
11143 			dstats->min_delay = delay;
11144 
11145 		if (delay > dstats->max_delay)
11146 			dstats->max_delay = delay;
11147 
11148 		/*
11149 		 * Average over delay measured till now
11150 		 */
11151 		if (!dstats->avg_delay)
11152 			dstats->avg_delay = delay;
11153 		else
11154 			dstats->avg_delay = ((delay + dstats->avg_delay) / 2);
11155 	}
11156 }
11157