xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 302a1d9701784af5f4797b1a9fe07ae820b51907)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_htt.h"
31 #include "dp_types.h"
32 #include "dp_internal.h"
33 #include "dp_tx.h"
34 #include "dp_tx_desc.h"
35 #include "dp_rx.h"
36 #include <cdp_txrx_handle.h>
37 #include <wlan_cfg.h>
38 #include "cdp_txrx_cmn_struct.h"
39 #include "cdp_txrx_stats_struct.h"
40 #include "cdp_txrx_cmn_reg.h"
41 #include <qdf_util.h>
42 #include "dp_peer.h"
43 #include "dp_rx_mon.h"
44 #include "htt_stats.h"
45 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
46 #include "cfg_ucfg_api.h"
47 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
48 #include "cdp_txrx_flow_ctrl_v2.h"
49 #else
50 static inline void
51 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
52 {
53 	return;
54 }
55 #endif
56 #include "dp_ipa.h"
57 
58 #include "dp_cal_client_api.h"
59 
60 #ifdef CONFIG_MCL
61 #ifndef REMOVE_PKT_LOG
62 #include <pktlog_ac_api.h>
63 #include <pktlog_ac.h>
64 #endif
65 #endif
66 static void dp_pktlogmod_exit(struct dp_pdev *handle);
67 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
68 				uint8_t *peer_mac_addr,
69 				struct cdp_ctrl_objmgr_peer *ctrl_peer);
70 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
71 static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
72 static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
73 
74 #define DP_INTR_POLL_TIMER_MS	10
75 #define DP_WDS_AGING_TIMER_DEFAULT_MS	120000
76 #define DP_MCS_LENGTH (6*MAX_MCS)
77 #define DP_NSS_LENGTH (6*SS_COUNT)
78 #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
79 #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
80 #define DP_MAX_MCS_STRING_LEN 30
81 #define DP_CURR_FW_STATS_AVAIL 19
82 #define DP_HTT_DBG_EXT_STATS_MAX 256
83 #define DP_MAX_SLEEP_TIME 100
84 
85 #ifdef IPA_OFFLOAD
86 /* Exclude IPA rings from the interrupt context */
87 #define TX_RING_MASK_VAL	0xb
88 #define RX_RING_MASK_VAL	0x7
89 #else
90 #define TX_RING_MASK_VAL	0xF
91 #define RX_RING_MASK_VAL	0xF
92 #endif
93 
94 #define STR_MAXLEN	64
95 
96 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
97 
98 /* PPDU stats mask sent to FW to enable enhanced stats */
99 #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
100 /* PPDU stats mask sent to FW to support debug sniffer feature */
101 #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
102 /* PPDU stats mask sent to FW to support BPR feature*/
103 #define DP_PPDU_STATS_CFG_BPR 0x2000
104 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
105 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
106 				   DP_PPDU_STATS_CFG_ENH_STATS)
107 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
108 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
109 				      DP_PPDU_TXLITE_STATS_BITMASK_CFG)
110 
111 #define RNG_ERR		"SRNG setup failed for"
112 /**
113  * default_dscp_tid_map - Default DSCP-TID mapping
114  *
115  * DSCP        TID
116  * 000000      0
117  * 001000      1
118  * 010000      2
119  * 011000      3
120  * 100000      4
121  * 101000      5
122  * 110000      6
123  * 111000      7
124  */
125 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
126 	0, 0, 0, 0, 0, 0, 0, 0,
127 	1, 1, 1, 1, 1, 1, 1, 1,
128 	2, 2, 2, 2, 2, 2, 2, 2,
129 	3, 3, 3, 3, 3, 3, 3, 3,
130 	4, 4, 4, 4, 4, 4, 4, 4,
131 	5, 5, 5, 5, 5, 5, 5, 5,
132 	6, 6, 6, 6, 6, 6, 6, 6,
133 	7, 7, 7, 7, 7, 7, 7, 7,
134 };
135 
136 /*
137  * struct dp_rate_debug
138  *
139  * @mcs_type: print string for a given mcs
140  * @valid: valid mcs rate?
141  */
142 struct dp_rate_debug {
143 	char mcs_type[DP_MAX_MCS_STRING_LEN];
144 	uint8_t valid;
145 };
146 
147 #define MCS_VALID 1
148 #define MCS_INVALID 0
149 
150 static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
151 
152 	{
153 		{"OFDM 48 Mbps", MCS_VALID},
154 		{"OFDM 24 Mbps", MCS_VALID},
155 		{"OFDM 12 Mbps", MCS_VALID},
156 		{"OFDM 6 Mbps ", MCS_VALID},
157 		{"OFDM 54 Mbps", MCS_VALID},
158 		{"OFDM 36 Mbps", MCS_VALID},
159 		{"OFDM 18 Mbps", MCS_VALID},
160 		{"OFDM 9 Mbps ", MCS_VALID},
161 		{"INVALID ", MCS_INVALID},
162 		{"INVALID ", MCS_INVALID},
163 		{"INVALID ", MCS_INVALID},
164 		{"INVALID ", MCS_INVALID},
165 		{"INVALID ", MCS_VALID},
166 	},
167 	{
168 		{"CCK 11 Mbps Long  ", MCS_VALID},
169 		{"CCK 5.5 Mbps Long ", MCS_VALID},
170 		{"CCK 2 Mbps Long   ", MCS_VALID},
171 		{"CCK 1 Mbps Long   ", MCS_VALID},
172 		{"CCK 11 Mbps Short ", MCS_VALID},
173 		{"CCK 5.5 Mbps Short", MCS_VALID},
174 		{"CCK 2 Mbps Short  ", MCS_VALID},
175 		{"INVALID ", MCS_INVALID},
176 		{"INVALID ", MCS_INVALID},
177 		{"INVALID ", MCS_INVALID},
178 		{"INVALID ", MCS_INVALID},
179 		{"INVALID ", MCS_INVALID},
180 		{"INVALID ", MCS_VALID},
181 	},
182 	{
183 		{"HT MCS 0 (BPSK 1/2)  ", MCS_VALID},
184 		{"HT MCS 1 (QPSK 1/2)  ", MCS_VALID},
185 		{"HT MCS 2 (QPSK 3/4)  ", MCS_VALID},
186 		{"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
187 		{"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
188 		{"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
189 		{"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
190 		{"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
191 		{"INVALID ", MCS_INVALID},
192 		{"INVALID ", MCS_INVALID},
193 		{"INVALID ", MCS_INVALID},
194 		{"INVALID ", MCS_INVALID},
195 		{"INVALID ", MCS_VALID},
196 	},
197 	{
198 		{"VHT MCS 0 (BPSK 1/2)     ", MCS_VALID},
199 		{"VHT MCS 1 (QPSK 1/2)     ", MCS_VALID},
200 		{"VHT MCS 2 (QPSK 3/4)     ", MCS_VALID},
201 		{"VHT MCS 3 (16-QAM 1/2)   ", MCS_VALID},
202 		{"VHT MCS 4 (16-QAM 3/4)   ", MCS_VALID},
203 		{"VHT MCS 5 (64-QAM 2/3)   ", MCS_VALID},
204 		{"VHT MCS 6 (64-QAM 3/4)   ", MCS_VALID},
205 		{"VHT MCS 7 (64-QAM 5/6)   ", MCS_VALID},
206 		{"VHT MCS 8 (256-QAM 3/4)  ", MCS_VALID},
207 		{"VHT MCS 9 (256-QAM 5/6)  ", MCS_VALID},
208 		{"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
209 		{"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
210 		{"INVALID ", MCS_VALID},
211 	},
212 	{
213 		{"HE MCS 0 (BPSK 1/2)     ", MCS_VALID},
214 		{"HE MCS 1 (QPSK 1/2)     ", MCS_VALID},
215 		{"HE MCS 2 (QPSK 3/4)     ", MCS_VALID},
216 		{"HE MCS 3 (16-QAM 1/2)   ", MCS_VALID},
217 		{"HE MCS 4 (16-QAM 3/4)   ", MCS_VALID},
218 		{"HE MCS 5 (64-QAM 2/3)   ", MCS_VALID},
219 		{"HE MCS 6 (64-QAM 3/4)   ", MCS_VALID},
220 		{"HE MCS 7 (64-QAM 5/6)   ", MCS_VALID},
221 		{"HE MCS 8 (256-QAM 3/4)  ", MCS_VALID},
222 		{"HE MCS 9 (256-QAM 5/6)  ", MCS_VALID},
223 		{"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
224 		{"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
225 		{"INVALID ", MCS_VALID},
226 	}
227 };
228 
229 /**
230  * @brief Cpu ring map types
231  */
232 enum dp_cpu_ring_map_types {
233 	DP_DEFAULT_MAP,
234 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
235 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
236 	DP_NSS_ALL_RADIO_OFFLOADED_MAP,
237 	DP_CPU_RING_MAP_MAX
238 };
239 
240 /**
241  * @brief Cpu to tx ring map
242  */
243 static uint8_t dp_cpu_ring_map[DP_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
244 	{0x0, 0x1, 0x2, 0x0},
245 	{0x1, 0x2, 0x1, 0x2},
246 	{0x0, 0x2, 0x0, 0x2},
247 	{0x2, 0x2, 0x2, 0x2}
248 };
249 
250 /**
251  * @brief Select the type of statistics
252  */
253 enum dp_stats_type {
254 	STATS_FW = 0,
255 	STATS_HOST = 1,
256 	STATS_TYPE_MAX = 2,
257 };
258 
259 /**
260  * @brief General Firmware statistics options
261  *
262  */
263 enum dp_fw_stats {
264 	TXRX_FW_STATS_INVALID	= -1,
265 };
266 
267 /**
268  * dp_stats_mapping_table - Firmware and Host statistics
269  * currently supported
270  */
271 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
272 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
273 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
274 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
275 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
276 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
277 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
278 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
279 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
280 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
281 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
282 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
283 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
284 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
285 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
286 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
287 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
288 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
289 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
290 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
291 	/* Last ENUM for HTT FW STATS */
292 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
293 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
294 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
295 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
296 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
297 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
298 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
299 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
300 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
301 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
302 };
303 
304 /* MCL specific functions */
305 #ifdef CONFIG_MCL
306 /**
307  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
308  * @soc: pointer to dp_soc handle
309  * @intr_ctx_num: interrupt context number for which mon mask is needed
310  *
311  * For MCL, monitor mode rings are being processed in timer contexts (polled).
312  * This function is returning 0, since in interrupt mode(softirq based RX),
313  * we donot want to process monitor mode rings in a softirq.
314  *
315  * So, in case packet log is enabled for SAP/STA/P2P modes,
316  * regular interrupt processing will not process monitor mode rings. It would be
317  * done in a separate timer context.
318  *
319  * Return: 0
320  */
321 static inline
322 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
323 {
324 	return 0;
325 }
326 
327 /*
328  * dp_service_mon_rings()- timer to reap monitor rings
329  * reqd as we are not getting ppdu end interrupts
330  * @arg: SoC Handle
331  *
332  * Return:
333  *
334  */
335 static void dp_service_mon_rings(void *arg)
336 {
337 	struct dp_soc *soc = (struct dp_soc *)arg;
338 	int ring = 0, work_done, mac_id;
339 	struct dp_pdev *pdev = NULL;
340 
341 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
342 		pdev = soc->pdev_list[ring];
343 		if (!pdev)
344 			continue;
345 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
346 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
347 								pdev->pdev_id);
348 			work_done = dp_mon_process(soc, mac_for_pdev,
349 						   QCA_NAPI_BUDGET);
350 
351 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
352 				  FL("Reaped %d descs from Monitor rings"),
353 				  work_done);
354 		}
355 	}
356 
357 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
358 }
359 
360 #ifndef REMOVE_PKT_LOG
361 /**
362  * dp_pkt_log_init() - API to initialize packet log
363  * @ppdev: physical device handle
364  * @scn: HIF context
365  *
366  * Return: none
367  */
368 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
369 {
370 	struct dp_pdev *handle = (struct dp_pdev *)ppdev;
371 
372 	if (handle->pkt_log_init) {
373 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
374 			  "%s: Packet log not initialized", __func__);
375 		return;
376 	}
377 
378 	pktlog_sethandle(&handle->pl_dev, scn);
379 	pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
380 
381 	if (pktlogmod_init(scn)) {
382 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
383 			  "%s: pktlogmod_init failed", __func__);
384 		handle->pkt_log_init = false;
385 	} else {
386 		handle->pkt_log_init = true;
387 	}
388 }
389 
390 /**
391  * dp_pkt_log_con_service() - connect packet log service
392  * @ppdev: physical device handle
393  * @scn: device context
394  *
395  * Return: none
396  */
397 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
398 {
399 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
400 
401 	dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
402 	pktlog_htc_attach();
403 }
404 
405 /**
406  * dp_pktlogmod_exit() - API to cleanup pktlog info
407  * @handle: Pdev handle
408  *
409  * Return: none
410  */
411 static void dp_pktlogmod_exit(struct dp_pdev *handle)
412 {
413 	void *scn = (void *)handle->soc->hif_handle;
414 
415 	if (!scn) {
416 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
417 			  "%s: Invalid hif(scn) handle", __func__);
418 		return;
419 	}
420 
421 	pktlogmod_exit(scn);
422 	handle->pkt_log_init = false;
423 }
424 #endif
425 #else
426 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
427 
428 /**
429  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
430  * @soc: pointer to dp_soc handle
431  * @intr_ctx_num: interrupt context number for which mon mask is needed
432  *
433  * Return: mon mask value
434  */
435 static inline
436 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
437 {
438 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
439 }
440 #endif
441 
442 /**
443  * dp_get_dp_vdev_from_cdp_vdev() - get dp_vdev from cdp_vdev by type-casting
444  * @cdp_opaque_vdev: pointer to cdp_vdev
445  *
446  * Return: pointer to dp_vdev
447  */
448 static
449 struct dp_vdev * dp_get_dp_vdev_from_cdp_vdev(struct cdp_vdev *cdp_opaque_vdev)
450 {
451 	return (struct dp_vdev *)cdp_opaque_vdev;
452 }
453 
454 
455 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
456 					struct cdp_peer *peer_hdl,
457 					uint8_t *mac_addr,
458 					enum cdp_txrx_ast_entry_type type,
459 					uint32_t flags)
460 {
461 
462 	return dp_peer_add_ast((struct dp_soc *)soc_hdl,
463 				(struct dp_peer *)peer_hdl,
464 				mac_addr,
465 				type,
466 				flags);
467 }
468 
469 static void dp_peer_del_ast_wifi3(struct cdp_soc_t *soc_hdl,
470 					 void *ast_entry_hdl)
471 {
472 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
473 	qdf_spin_lock_bh(&soc->ast_lock);
474 	dp_peer_del_ast((struct dp_soc *)soc_hdl,
475 			(struct dp_ast_entry *)ast_entry_hdl);
476 	qdf_spin_unlock_bh(&soc->ast_lock);
477 }
478 
479 
480 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
481 						struct cdp_peer *peer_hdl,
482 						uint8_t *wds_macaddr,
483 						uint32_t flags)
484 {
485 	int status = -1;
486 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
487 	struct dp_ast_entry  *ast_entry = NULL;
488 
489 	qdf_spin_lock_bh(&soc->ast_lock);
490 	ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
491 
492 	if (ast_entry) {
493 		status = dp_peer_update_ast(soc,
494 					    (struct dp_peer *)peer_hdl,
495 					   ast_entry, flags);
496 	}
497 
498 	qdf_spin_unlock_bh(&soc->ast_lock);
499 
500 	return status;
501 }
502 
503 /*
504  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
505  * @soc_handle:		Datapath SOC handle
506  * @wds_macaddr:	WDS entry MAC Address
507  * Return: None
508  */
509 static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
510 				   uint8_t *wds_macaddr, void *vdev_handle)
511 {
512 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
513 	struct dp_ast_entry *ast_entry = NULL;
514 
515 	qdf_spin_lock_bh(&soc->ast_lock);
516 	ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
517 
518 	if (ast_entry) {
519 		if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
520 		    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF)) {
521 			ast_entry->is_active = TRUE;
522 		}
523 	}
524 
525 	qdf_spin_unlock_bh(&soc->ast_lock);
526 }
527 
528 /*
529  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
530  * @soc:		Datapath SOC handle
531  *
532  * Return: None
533  */
534 static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
535 					 void *vdev_hdl)
536 {
537 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
538 	struct dp_pdev *pdev;
539 	struct dp_vdev *vdev;
540 	struct dp_peer *peer;
541 	struct dp_ast_entry *ase, *temp_ase;
542 	int i;
543 
544 	qdf_spin_lock_bh(&soc->ast_lock);
545 
546 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
547 		pdev = soc->pdev_list[i];
548 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
549 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
550 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
551 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
552 					if ((ase->type ==
553 					     CDP_TXRX_AST_TYPE_STATIC) ||
554 					    (ase->type ==
555 					     CDP_TXRX_AST_TYPE_SELF))
556 						continue;
557 					ase->is_active = TRUE;
558 				}
559 			}
560 		}
561 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
562 	}
563 
564 	qdf_spin_unlock_bh(&soc->ast_lock);
565 }
566 
567 /*
568  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
569  * @soc:		Datapath SOC handle
570  *
571  * Return: None
572  */
573 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
574 {
575 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
576 	struct dp_pdev *pdev;
577 	struct dp_vdev *vdev;
578 	struct dp_peer *peer;
579 	struct dp_ast_entry *ase, *temp_ase;
580 	int i;
581 
582 	qdf_spin_lock_bh(&soc->ast_lock);
583 
584 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
585 		pdev = soc->pdev_list[i];
586 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
587 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
588 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
589 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
590 					if ((ase->type ==
591 					     CDP_TXRX_AST_TYPE_STATIC) ||
592 					    (ase->type ==
593 					     CDP_TXRX_AST_TYPE_SELF))
594 						continue;
595 					dp_peer_del_ast(soc, ase);
596 				}
597 			}
598 		}
599 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
600 	}
601 
602 	qdf_spin_unlock_bh(&soc->ast_lock);
603 }
604 
605 static void *dp_peer_ast_hash_find_wifi3(struct cdp_soc_t *soc_hdl,
606 						uint8_t *ast_mac_addr)
607 {
608 	struct dp_ast_entry *ast_entry;
609 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
610 	qdf_spin_lock_bh(&soc->ast_lock);
611 	ast_entry = dp_peer_ast_hash_find(soc, ast_mac_addr);
612 	qdf_spin_unlock_bh(&soc->ast_lock);
613 	return (void *)ast_entry;
614 }
615 
616 static uint8_t dp_peer_ast_get_pdev_id_wifi3(struct cdp_soc_t *soc_hdl,
617 							void *ast_entry_hdl)
618 {
619 	return dp_peer_ast_get_pdev_id((struct dp_soc *)soc_hdl,
620 					(struct dp_ast_entry *)ast_entry_hdl);
621 }
622 
623 static uint8_t dp_peer_ast_get_next_hop_wifi3(struct cdp_soc_t *soc_hdl,
624 							void *ast_entry_hdl)
625 {
626 	return dp_peer_ast_get_next_hop((struct dp_soc *)soc_hdl,
627 					(struct dp_ast_entry *)ast_entry_hdl);
628 }
629 
630 static void dp_peer_ast_set_type_wifi3(
631 					struct cdp_soc_t *soc_hdl,
632 					void *ast_entry_hdl,
633 					enum cdp_txrx_ast_entry_type type)
634 {
635 	dp_peer_ast_set_type((struct dp_soc *)soc_hdl,
636 				(struct dp_ast_entry *)ast_entry_hdl,
637 				type);
638 }
639 
640 static enum cdp_txrx_ast_entry_type dp_peer_ast_get_type_wifi3(
641 					struct cdp_soc_t *soc_hdl,
642 					void *ast_entry_hdl)
643 {
644 	return ((struct dp_ast_entry *)ast_entry_hdl)->type;
645 }
646 
647 /**
648  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
649  * @ring_num: ring num of the ring being queried
650  * @grp_mask: the grp_mask array for the ring type in question.
651  *
652  * The grp_mask array is indexed by group number and the bit fields correspond
653  * to ring numbers.  We are finding which interrupt group a ring belongs to.
654  *
655  * Return: the index in the grp_mask array with the ring number.
656  * -QDF_STATUS_E_NOENT if no entry is found
657  */
658 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
659 {
660 	int ext_group_num;
661 	int mask = 1 << ring_num;
662 
663 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
664 	     ext_group_num++) {
665 		if (mask & grp_mask[ext_group_num])
666 			return ext_group_num;
667 	}
668 
669 	return -QDF_STATUS_E_NOENT;
670 }
671 
672 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
673 				       enum hal_ring_type ring_type,
674 				       int ring_num)
675 {
676 	int *grp_mask;
677 
678 	switch (ring_type) {
679 	case WBM2SW_RELEASE:
680 		/* dp_tx_comp_handler - soc->tx_comp_ring */
681 		if (ring_num < 3)
682 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
683 
684 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
685 		else if (ring_num == 3) {
686 			/* sw treats this as a separate ring type */
687 			grp_mask = &soc->wlan_cfg_ctx->
688 				int_rx_wbm_rel_ring_mask[0];
689 			ring_num = 0;
690 		} else {
691 			qdf_assert(0);
692 			return -QDF_STATUS_E_NOENT;
693 		}
694 	break;
695 
696 	case REO_EXCEPTION:
697 		/* dp_rx_err_process - &soc->reo_exception_ring */
698 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
699 	break;
700 
701 	case REO_DST:
702 		/* dp_rx_process - soc->reo_dest_ring */
703 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
704 	break;
705 
706 	case REO_STATUS:
707 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
708 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
709 	break;
710 
711 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
712 	case RXDMA_MONITOR_STATUS:
713 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
714 	case RXDMA_MONITOR_DST:
715 		/* dp_mon_process */
716 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
717 	break;
718 	case RXDMA_DST:
719 		/* dp_rxdma_err_process */
720 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
721 	break;
722 
723 	case RXDMA_BUF:
724 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
725 	break;
726 
727 	case RXDMA_MONITOR_BUF:
728 		/* TODO: support low_thresh interrupt */
729 		return -QDF_STATUS_E_NOENT;
730 	break;
731 
732 	case TCL_DATA:
733 	case TCL_CMD:
734 	case REO_CMD:
735 	case SW2WBM_RELEASE:
736 	case WBM_IDLE_LINK:
737 		/* normally empty SW_TO_HW rings */
738 		return -QDF_STATUS_E_NOENT;
739 	break;
740 
741 	case TCL_STATUS:
742 	case REO_REINJECT:
743 		/* misc unused rings */
744 		return -QDF_STATUS_E_NOENT;
745 	break;
746 
747 	case CE_SRC:
748 	case CE_DST:
749 	case CE_DST_STATUS:
750 		/* CE_rings - currently handled by hif */
751 	default:
752 		return -QDF_STATUS_E_NOENT;
753 	break;
754 	}
755 
756 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
757 }
758 
759 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
760 			      *ring_params, int ring_type, int ring_num)
761 {
762 	int msi_group_number;
763 	int msi_data_count;
764 	int ret;
765 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
766 
767 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
768 					    &msi_data_count, &msi_data_start,
769 					    &msi_irq_start);
770 
771 	if (ret)
772 		return;
773 
774 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
775 						       ring_num);
776 	if (msi_group_number < 0) {
777 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
778 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
779 			ring_type, ring_num);
780 		ring_params->msi_addr = 0;
781 		ring_params->msi_data = 0;
782 		return;
783 	}
784 
785 	if (msi_group_number > msi_data_count) {
786 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
787 			FL("2 msi_groups will share an msi; msi_group_num %d"),
788 			msi_group_number);
789 
790 		QDF_ASSERT(0);
791 	}
792 
793 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
794 
795 	ring_params->msi_addr = addr_low;
796 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
797 	ring_params->msi_data = (msi_group_number % msi_data_count)
798 		+ msi_data_start;
799 	ring_params->flags |= HAL_SRNG_MSI_INTR;
800 }
801 
802 /**
803  * dp_print_ast_stats() - Dump AST table contents
804  * @soc: Datapath soc handle
805  *
806  * return void
807  */
808 #ifdef FEATURE_AST
809 static void dp_print_ast_stats(struct dp_soc *soc)
810 {
811 	uint8_t i;
812 	uint8_t num_entries = 0;
813 	struct dp_vdev *vdev;
814 	struct dp_pdev *pdev;
815 	struct dp_peer *peer;
816 	struct dp_ast_entry *ase, *tmp_ase;
817 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
818 			"NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS"};
819 
820 	DP_PRINT_STATS("AST Stats:");
821 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
822 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
823 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
824 	DP_PRINT_STATS("AST Table:");
825 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
826 		pdev = soc->pdev_list[i];
827 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
828 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
829 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
830 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
831 					DP_PRINT_STATS("%6d mac_addr = %pM"
832 							" peer_mac_addr = %pM"
833 							" type = %s"
834 							" next_hop = %d"
835 							" is_active = %d"
836 							" is_bss = %d"
837 							" ast_idx = %d"
838 							" pdev_id = %d"
839 							" vdev_id = %d",
840 							++num_entries,
841 							ase->mac_addr.raw,
842 							ase->peer->mac_addr.raw,
843 							type[ase->type],
844 							ase->next_hop,
845 							ase->is_active,
846 							ase->is_bss,
847 							ase->ast_idx,
848 							ase->pdev_id,
849 							ase->vdev_id);
850 				}
851 			}
852 		}
853 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
854 	}
855 }
856 #else
857 static void dp_print_ast_stats(struct dp_soc *soc)
858 {
859 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
860 	return;
861 }
862 #endif
863 
864 static void dp_print_peer_table(struct dp_vdev *vdev)
865 {
866 	struct dp_peer *peer = NULL;
867 
868 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
869 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
870 		if (!peer) {
871 			DP_PRINT_STATS("Invalid Peer");
872 			return;
873 		}
874 		DP_PRINT_STATS("    peer_mac_addr = %pM"
875 			" nawds_enabled = %d"
876 			" bss_peer = %d"
877 			" wapi = %d"
878 			" wds_enabled = %d"
879 			" delete in progress = %d",
880 			peer->mac_addr.raw,
881 			peer->nawds_enabled,
882 			peer->bss_peer,
883 			peer->wapi,
884 			peer->wds_enabled,
885 			peer->delete_in_progress);
886 	}
887 }
888 
889 /*
890  * dp_setup_srng - Internal function to setup SRNG rings used by data path
891  */
892 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
893 	int ring_type, int ring_num, int mac_id, uint32_t num_entries)
894 {
895 	void *hal_soc = soc->hal_soc;
896 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
897 	/* TODO: See if we should get align size from hal */
898 	uint32_t ring_base_align = 8;
899 	struct hal_srng_params ring_params;
900 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
901 
902 	/* TODO: Currently hal layer takes care of endianness related settings.
903 	 * See if these settings need to passed from DP layer
904 	 */
905 	ring_params.flags = 0;
906 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
907 		  FL("Ring type: %d, num:%d"), ring_type, ring_num);
908 
909 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
910 	srng->hal_srng = NULL;
911 	srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
912 	srng->num_entries = num_entries;
913 	srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
914 		soc->osdev, soc->osdev->dev, srng->alloc_size,
915 		&(srng->base_paddr_unaligned));
916 
917 	if (!srng->base_vaddr_unaligned) {
918 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
919 			FL("alloc failed - ring_type: %d, ring_num %d"),
920 			ring_type, ring_num);
921 		return QDF_STATUS_E_NOMEM;
922 	}
923 
924 	ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
925 		((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
926 	ring_params.ring_base_paddr = srng->base_paddr_unaligned +
927 		((unsigned long)(ring_params.ring_base_vaddr) -
928 		(unsigned long)srng->base_vaddr_unaligned);
929 	ring_params.num_entries = num_entries;
930 
931 	if (soc->intr_mode == DP_INTR_MSI) {
932 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
933 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
934 			  FL("Using MSI for ring_type: %d, ring_num %d"),
935 			  ring_type, ring_num);
936 
937 	} else {
938 		ring_params.msi_data = 0;
939 		ring_params.msi_addr = 0;
940 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
941 			  FL("Skipping MSI for ring_type: %d, ring_num %d"),
942 			  ring_type, ring_num);
943 	}
944 
945 	/*
946 	 * Setup interrupt timer and batch counter thresholds for
947 	 * interrupt mitigation based on ring type
948 	 */
949 	if (ring_type == REO_DST) {
950 		ring_params.intr_timer_thres_us =
951 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
952 		ring_params.intr_batch_cntr_thres_entries =
953 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
954 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
955 		ring_params.intr_timer_thres_us =
956 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
957 		ring_params.intr_batch_cntr_thres_entries =
958 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
959 	} else {
960 		ring_params.intr_timer_thres_us =
961 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
962 		ring_params.intr_batch_cntr_thres_entries =
963 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
964 	}
965 
966 	/* Enable low threshold interrupts for rx buffer rings (regular and
967 	 * monitor buffer rings.
968 	 * TODO: See if this is required for any other ring
969 	 */
970 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
971 		(ring_type == RXDMA_MONITOR_STATUS)) {
972 		/* TODO: Setting low threshold to 1/8th of ring size
973 		 * see if this needs to be configurable
974 		 */
975 		ring_params.low_threshold = num_entries >> 3;
976 		ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
977 		ring_params.intr_timer_thres_us =
978 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
979 		ring_params.intr_batch_cntr_thres_entries = 0;
980 	}
981 
982 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
983 		mac_id, &ring_params);
984 
985 	if (!srng->hal_srng) {
986 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
987 				srng->alloc_size,
988 				srng->base_vaddr_unaligned,
989 				srng->base_paddr_unaligned, 0);
990 	}
991 
992 	return 0;
993 }
994 
995 /**
996  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
997  * Any buffers allocated and attached to ring entries are expected to be freed
998  * before calling this function.
999  */
1000 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
1001 	int ring_type, int ring_num)
1002 {
1003 	if (!srng->hal_srng) {
1004 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1005 			FL("Ring type: %d, num:%d not setup"),
1006 			ring_type, ring_num);
1007 		return;
1008 	}
1009 
1010 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1011 
1012 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1013 				srng->alloc_size,
1014 				srng->base_vaddr_unaligned,
1015 				srng->base_paddr_unaligned, 0);
1016 	srng->hal_srng = NULL;
1017 }
1018 
1019 /* TODO: Need this interface from HIF */
1020 void *hif_get_hal_handle(void *hif_handle);
1021 
1022 /*
1023  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1024  * @dp_ctx: DP SOC handle
1025  * @budget: Number of frames/descriptors that can be processed in one shot
1026  *
1027  * Return: remaining budget/quota for the soc device
1028  */
1029 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
1030 {
1031 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1032 	struct dp_soc *soc = int_ctx->soc;
1033 	int ring = 0;
1034 	uint32_t work_done  = 0;
1035 	int budget = dp_budget;
1036 	uint8_t tx_mask = int_ctx->tx_ring_mask;
1037 	uint8_t rx_mask = int_ctx->rx_ring_mask;
1038 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1039 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1040 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1041 	uint32_t remaining_quota = dp_budget;
1042 	struct dp_pdev *pdev = NULL;
1043 	int mac_id;
1044 
1045 	/* Process Tx completion interrupts first to return back buffers */
1046 	while (tx_mask) {
1047 		if (tx_mask & 0x1) {
1048 			work_done = dp_tx_comp_handler(soc,
1049 					soc->tx_comp_ring[ring].hal_srng,
1050 					remaining_quota);
1051 
1052 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1053 				"tx mask 0x%x ring %d, budget %d, work_done %d",
1054 				tx_mask, ring, budget, work_done);
1055 
1056 			budget -= work_done;
1057 			if (budget <= 0)
1058 				goto budget_done;
1059 
1060 			remaining_quota = budget;
1061 		}
1062 		tx_mask = tx_mask >> 1;
1063 		ring++;
1064 	}
1065 
1066 
1067 	/* Process REO Exception ring interrupt */
1068 	if (rx_err_mask) {
1069 		work_done = dp_rx_err_process(soc,
1070 				soc->reo_exception_ring.hal_srng,
1071 				remaining_quota);
1072 
1073 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1074 			"REO Exception Ring: work_done %d budget %d",
1075 			work_done, budget);
1076 
1077 		budget -=  work_done;
1078 		if (budget <= 0) {
1079 			goto budget_done;
1080 		}
1081 		remaining_quota = budget;
1082 	}
1083 
1084 	/* Process Rx WBM release ring interrupt */
1085 	if (rx_wbm_rel_mask) {
1086 		work_done = dp_rx_wbm_err_process(soc,
1087 				soc->rx_rel_ring.hal_srng, remaining_quota);
1088 
1089 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1090 			"WBM Release Ring: work_done %d budget %d",
1091 			work_done, budget);
1092 
1093 		budget -=  work_done;
1094 		if (budget <= 0) {
1095 			goto budget_done;
1096 		}
1097 		remaining_quota = budget;
1098 	}
1099 
1100 	/* Process Rx interrupts */
1101 	if (rx_mask) {
1102 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1103 			if (rx_mask & (1 << ring)) {
1104 				work_done = dp_rx_process(int_ctx,
1105 					    soc->reo_dest_ring[ring].hal_srng,
1106 					    ring,
1107 					    remaining_quota);
1108 
1109 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1110 					"rx mask 0x%x ring %d, work_done %d budget %d",
1111 					rx_mask, ring, work_done, budget);
1112 
1113 				budget -=  work_done;
1114 				if (budget <= 0)
1115 					goto budget_done;
1116 				remaining_quota = budget;
1117 			}
1118 		}
1119 		for (ring = 0; ring < MAX_RX_MAC_RINGS; ring++) {
1120 			work_done = dp_rxdma_err_process(soc, ring,
1121 						remaining_quota);
1122 			budget -= work_done;
1123 		}
1124 	}
1125 
1126 	if (reo_status_mask)
1127 		dp_reo_status_ring_handler(soc);
1128 
1129 	/* Process LMAC interrupts */
1130 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
1131 		pdev = soc->pdev_list[ring];
1132 		if (pdev == NULL)
1133 			continue;
1134 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1135 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
1136 								pdev->pdev_id);
1137 
1138 			if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1139 				work_done = dp_mon_process(soc, mac_for_pdev,
1140 						remaining_quota);
1141 				budget -= work_done;
1142 				if (budget <= 0)
1143 					goto budget_done;
1144 				remaining_quota = budget;
1145 			}
1146 
1147 			if (int_ctx->rxdma2host_ring_mask &
1148 					(1 << mac_for_pdev)) {
1149 				work_done = dp_rxdma_err_process(soc,
1150 							mac_for_pdev,
1151 							remaining_quota);
1152 				budget -=  work_done;
1153 				if (budget <= 0)
1154 					goto budget_done;
1155 				remaining_quota = budget;
1156 			}
1157 
1158 			if (int_ctx->host2rxdma_ring_mask &
1159 						(1 << mac_for_pdev)) {
1160 				union dp_rx_desc_list_elem_t *desc_list = NULL;
1161 				union dp_rx_desc_list_elem_t *tail = NULL;
1162 				struct dp_srng *rx_refill_buf_ring =
1163 					&pdev->rx_refill_buf_ring;
1164 
1165 				DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1166 						1);
1167 				dp_rx_buffers_replenish(soc, mac_for_pdev,
1168 					rx_refill_buf_ring,
1169 					&soc->rx_desc_buf[mac_for_pdev], 0,
1170 					&desc_list, &tail);
1171 			}
1172 		}
1173 	}
1174 
1175 	qdf_lro_flush(int_ctx->lro_ctx);
1176 
1177 budget_done:
1178 	return dp_budget - budget;
1179 }
1180 
1181 /* dp_interrupt_timer()- timer poll for interrupts
1182  *
1183  * @arg: SoC Handle
1184  *
1185  * Return:
1186  *
1187  */
1188 static void dp_interrupt_timer(void *arg)
1189 {
1190 	struct dp_soc *soc = (struct dp_soc *) arg;
1191 	int i;
1192 
1193 	if (qdf_atomic_read(&soc->cmn_init_done)) {
1194 		for (i = 0;
1195 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1196 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
1197 
1198 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1199 	}
1200 }
1201 
1202 /*
1203  * dp_soc_interrupt_attach_poll() - Register handlers for DP interrupts
1204  * @txrx_soc: DP SOC handle
1205  *
1206  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1207  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1208  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1209  *
1210  * Return: 0 for success. nonzero for failure.
1211  */
1212 static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
1213 {
1214 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1215 	int i;
1216 
1217 	soc->intr_mode = DP_INTR_POLL;
1218 
1219 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1220 		soc->intr_ctx[i].dp_intr_id = i;
1221 		soc->intr_ctx[i].tx_ring_mask =
1222 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1223 		soc->intr_ctx[i].rx_ring_mask =
1224 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1225 		soc->intr_ctx[i].rx_mon_ring_mask =
1226 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1227 		soc->intr_ctx[i].rx_err_ring_mask =
1228 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1229 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1230 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1231 		soc->intr_ctx[i].reo_status_ring_mask =
1232 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1233 		soc->intr_ctx[i].rxdma2host_ring_mask =
1234 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1235 		soc->intr_ctx[i].soc = soc;
1236 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1237 	}
1238 
1239 	qdf_timer_init(soc->osdev, &soc->int_timer,
1240 			dp_interrupt_timer, (void *)soc,
1241 			QDF_TIMER_TYPE_WAKE_APPS);
1242 
1243 	return QDF_STATUS_SUCCESS;
1244 }
1245 
1246 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
1247 #if defined(CONFIG_MCL)
1248 extern int con_mode_monitor;
1249 /*
1250  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1251  * @txrx_soc: DP SOC handle
1252  *
1253  * Call the appropriate attach function based on the mode of operation.
1254  * This is a WAR for enabling monitor mode.
1255  *
1256  * Return: 0 for success. nonzero for failure.
1257  */
1258 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1259 {
1260 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1261 
1262 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1263 	     con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
1264 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1265 				  "%s: Poll mode", __func__);
1266 		return dp_soc_attach_poll(txrx_soc);
1267 	} else {
1268 
1269 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1270 				  "%s: Interrupt  mode", __func__);
1271 		return dp_soc_interrupt_attach(txrx_soc);
1272 	}
1273 }
1274 #else
1275 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
1276 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1277 {
1278 	return dp_soc_attach_poll(txrx_soc);
1279 }
1280 #else
1281 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1282 {
1283 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1284 
1285 	if (hif_is_polled_mode_enabled(soc->hif_handle))
1286 		return dp_soc_attach_poll(txrx_soc);
1287 	else
1288 		return dp_soc_interrupt_attach(txrx_soc);
1289 }
1290 #endif
1291 #endif
1292 
1293 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1294 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1295 {
1296 	int j;
1297 	int num_irq = 0;
1298 
1299 	int tx_mask =
1300 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1301 	int rx_mask =
1302 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1303 	int rx_mon_mask =
1304 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1305 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1306 					soc->wlan_cfg_ctx, intr_ctx_num);
1307 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1308 					soc->wlan_cfg_ctx, intr_ctx_num);
1309 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1310 					soc->wlan_cfg_ctx, intr_ctx_num);
1311 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1312 					soc->wlan_cfg_ctx, intr_ctx_num);
1313 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1314 					soc->wlan_cfg_ctx, intr_ctx_num);
1315 
1316 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1317 
1318 		if (tx_mask & (1 << j)) {
1319 			irq_id_map[num_irq++] =
1320 				(wbm2host_tx_completions_ring1 - j);
1321 		}
1322 
1323 		if (rx_mask & (1 << j)) {
1324 			irq_id_map[num_irq++] =
1325 				(reo2host_destination_ring1 - j);
1326 		}
1327 
1328 		if (rxdma2host_ring_mask & (1 << j)) {
1329 			irq_id_map[num_irq++] =
1330 				rxdma2host_destination_ring_mac1 -
1331 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1332 		}
1333 
1334 		if (host2rxdma_ring_mask & (1 << j)) {
1335 			irq_id_map[num_irq++] =
1336 				host2rxdma_host_buf_ring_mac1 -
1337 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1338 		}
1339 
1340 		if (rx_mon_mask & (1 << j)) {
1341 			irq_id_map[num_irq++] =
1342 				ppdu_end_interrupts_mac1 -
1343 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1344 			irq_id_map[num_irq++] =
1345 				rxdma2host_monitor_status_ring_mac1 -
1346 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1347 		}
1348 
1349 		if (rx_wbm_rel_ring_mask & (1 << j))
1350 			irq_id_map[num_irq++] = wbm2host_rx_release;
1351 
1352 		if (rx_err_ring_mask & (1 << j))
1353 			irq_id_map[num_irq++] = reo2host_exception;
1354 
1355 		if (reo_status_ring_mask & (1 << j))
1356 			irq_id_map[num_irq++] = reo2host_status;
1357 
1358 	}
1359 	*num_irq_r = num_irq;
1360 }
1361 
1362 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1363 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1364 		int msi_vector_count, int msi_vector_start)
1365 {
1366 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1367 					soc->wlan_cfg_ctx, intr_ctx_num);
1368 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1369 					soc->wlan_cfg_ctx, intr_ctx_num);
1370 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1371 					soc->wlan_cfg_ctx, intr_ctx_num);
1372 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1373 					soc->wlan_cfg_ctx, intr_ctx_num);
1374 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1375 					soc->wlan_cfg_ctx, intr_ctx_num);
1376 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1377 					soc->wlan_cfg_ctx, intr_ctx_num);
1378 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1379 					soc->wlan_cfg_ctx, intr_ctx_num);
1380 
1381 	unsigned int vector =
1382 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1383 	int num_irq = 0;
1384 
1385 	soc->intr_mode = DP_INTR_MSI;
1386 
1387 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1388 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1389 		irq_id_map[num_irq++] =
1390 			pld_get_msi_irq(soc->osdev->dev, vector);
1391 
1392 	*num_irq_r = num_irq;
1393 }
1394 
1395 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1396 				    int *irq_id_map, int *num_irq)
1397 {
1398 	int msi_vector_count, ret;
1399 	uint32_t msi_base_data, msi_vector_start;
1400 
1401 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1402 					    &msi_vector_count,
1403 					    &msi_base_data,
1404 					    &msi_vector_start);
1405 	if (ret)
1406 		return dp_soc_interrupt_map_calculate_integrated(soc,
1407 				intr_ctx_num, irq_id_map, num_irq);
1408 
1409 	else
1410 		dp_soc_interrupt_map_calculate_msi(soc,
1411 				intr_ctx_num, irq_id_map, num_irq,
1412 				msi_vector_count, msi_vector_start);
1413 }
1414 
1415 /*
1416  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1417  * @txrx_soc: DP SOC handle
1418  *
1419  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1420  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1421  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1422  *
1423  * Return: 0 for success. nonzero for failure.
1424  */
1425 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
1426 {
1427 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1428 
1429 	int i = 0;
1430 	int num_irq = 0;
1431 
1432 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1433 		int ret = 0;
1434 
1435 		/* Map of IRQ ids registered with one interrupt context */
1436 		int irq_id_map[HIF_MAX_GRP_IRQ];
1437 
1438 		int tx_mask =
1439 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1440 		int rx_mask =
1441 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1442 		int rx_mon_mask =
1443 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
1444 		int rx_err_ring_mask =
1445 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1446 		int rx_wbm_rel_ring_mask =
1447 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1448 		int reo_status_ring_mask =
1449 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1450 		int rxdma2host_ring_mask =
1451 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1452 		int host2rxdma_ring_mask =
1453 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1454 
1455 
1456 		soc->intr_ctx[i].dp_intr_id = i;
1457 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1458 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1459 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1460 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1461 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1462 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1463 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1464 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1465 
1466 		soc->intr_ctx[i].soc = soc;
1467 
1468 		num_irq = 0;
1469 
1470 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1471 					       &num_irq);
1472 
1473 		ret = hif_register_ext_group(soc->hif_handle,
1474 				num_irq, irq_id_map, dp_service_srngs,
1475 				&soc->intr_ctx[i], "dp_intr",
1476 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1477 
1478 		if (ret) {
1479 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1480 			FL("failed, ret = %d"), ret);
1481 
1482 			return QDF_STATUS_E_FAILURE;
1483 		}
1484 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1485 	}
1486 
1487 	hif_configure_ext_group_interrupts(soc->hif_handle);
1488 
1489 	return QDF_STATUS_SUCCESS;
1490 }
1491 
1492 /*
1493  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1494  * @txrx_soc: DP SOC handle
1495  *
1496  * Return: void
1497  */
1498 static void dp_soc_interrupt_detach(void *txrx_soc)
1499 {
1500 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1501 	int i;
1502 
1503 	if (soc->intr_mode == DP_INTR_POLL) {
1504 		qdf_timer_stop(&soc->int_timer);
1505 		qdf_timer_free(&soc->int_timer);
1506 	} else {
1507 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1508 	}
1509 
1510 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1511 		soc->intr_ctx[i].tx_ring_mask = 0;
1512 		soc->intr_ctx[i].rx_ring_mask = 0;
1513 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1514 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1515 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1516 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1517 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1518 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1519 
1520 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1521 	}
1522 }
1523 
1524 #define AVG_MAX_MPDUS_PER_TID 128
1525 #define AVG_TIDS_PER_CLIENT 2
1526 #define AVG_FLOWS_PER_TID 2
1527 #define AVG_MSDUS_PER_FLOW 128
1528 #define AVG_MSDUS_PER_MPDU 4
1529 
1530 /*
1531  * Allocate and setup link descriptor pool that will be used by HW for
1532  * various link and queue descriptors and managed by WBM
1533  */
1534 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1535 {
1536 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1537 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1538 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1539 	uint32_t num_mpdus_per_link_desc =
1540 		hal_num_mpdus_per_link_desc(soc->hal_soc);
1541 	uint32_t num_msdus_per_link_desc =
1542 		hal_num_msdus_per_link_desc(soc->hal_soc);
1543 	uint32_t num_mpdu_links_per_queue_desc =
1544 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1545 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1546 	uint32_t total_link_descs, total_mem_size;
1547 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1548 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1549 	uint32_t num_link_desc_banks;
1550 	uint32_t last_bank_size = 0;
1551 	uint32_t entry_size, num_entries;
1552 	int i;
1553 	uint32_t desc_id = 0;
1554 
1555 	/* Only Tx queue descriptors are allocated from common link descriptor
1556 	 * pool Rx queue descriptors are not included in this because (REO queue
1557 	 * extension descriptors) they are expected to be allocated contiguously
1558 	 * with REO queue descriptors
1559 	 */
1560 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1561 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1562 
1563 	num_mpdu_queue_descs = num_mpdu_link_descs /
1564 		num_mpdu_links_per_queue_desc;
1565 
1566 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1567 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1568 		num_msdus_per_link_desc;
1569 
1570 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1571 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1572 
1573 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1574 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1575 
1576 	/* Round up to power of 2 */
1577 	total_link_descs = 1;
1578 	while (total_link_descs < num_entries)
1579 		total_link_descs <<= 1;
1580 
1581 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1582 		FL("total_link_descs: %u, link_desc_size: %d"),
1583 		total_link_descs, link_desc_size);
1584 	total_mem_size =  total_link_descs * link_desc_size;
1585 
1586 	total_mem_size += link_desc_align;
1587 
1588 	if (total_mem_size <= max_alloc_size) {
1589 		num_link_desc_banks = 0;
1590 		last_bank_size = total_mem_size;
1591 	} else {
1592 		num_link_desc_banks = (total_mem_size) /
1593 			(max_alloc_size - link_desc_align);
1594 		last_bank_size = total_mem_size %
1595 			(max_alloc_size - link_desc_align);
1596 	}
1597 
1598 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1599 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
1600 		total_mem_size, num_link_desc_banks);
1601 
1602 	for (i = 0; i < num_link_desc_banks; i++) {
1603 		soc->link_desc_banks[i].base_vaddr_unaligned =
1604 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1605 			max_alloc_size,
1606 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1607 		soc->link_desc_banks[i].size = max_alloc_size;
1608 
1609 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1610 			soc->link_desc_banks[i].base_vaddr_unaligned) +
1611 			((unsigned long)(
1612 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1613 			link_desc_align));
1614 
1615 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
1616 			soc->link_desc_banks[i].base_paddr_unaligned) +
1617 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1618 			(unsigned long)(
1619 			soc->link_desc_banks[i].base_vaddr_unaligned));
1620 
1621 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
1622 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1623 				FL("Link descriptor memory alloc failed"));
1624 			goto fail;
1625 		}
1626 	}
1627 
1628 	if (last_bank_size) {
1629 		/* Allocate last bank in case total memory required is not exact
1630 		 * multiple of max_alloc_size
1631 		 */
1632 		soc->link_desc_banks[i].base_vaddr_unaligned =
1633 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1634 			last_bank_size,
1635 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1636 		soc->link_desc_banks[i].size = last_bank_size;
1637 
1638 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1639 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
1640 			((unsigned long)(
1641 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1642 			link_desc_align));
1643 
1644 		soc->link_desc_banks[i].base_paddr =
1645 			(unsigned long)(
1646 			soc->link_desc_banks[i].base_paddr_unaligned) +
1647 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1648 			(unsigned long)(
1649 			soc->link_desc_banks[i].base_vaddr_unaligned));
1650 	}
1651 
1652 
1653 	/* Allocate and setup link descriptor idle list for HW internal use */
1654 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1655 	total_mem_size = entry_size * total_link_descs;
1656 
1657 	if (total_mem_size <= max_alloc_size) {
1658 		void *desc;
1659 
1660 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
1661 			WBM_IDLE_LINK, 0, 0, total_link_descs)) {
1662 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1663 				FL("Link desc idle ring setup failed"));
1664 			goto fail;
1665 		}
1666 
1667 		hal_srng_access_start_unlocked(soc->hal_soc,
1668 			soc->wbm_idle_link_ring.hal_srng);
1669 
1670 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1671 			soc->link_desc_banks[i].base_paddr; i++) {
1672 			uint32_t num_entries = (soc->link_desc_banks[i].size -
1673 				((unsigned long)(
1674 				soc->link_desc_banks[i].base_vaddr) -
1675 				(unsigned long)(
1676 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1677 				/ link_desc_size;
1678 			unsigned long paddr = (unsigned long)(
1679 				soc->link_desc_banks[i].base_paddr);
1680 
1681 			while (num_entries && (desc = hal_srng_src_get_next(
1682 				soc->hal_soc,
1683 				soc->wbm_idle_link_ring.hal_srng))) {
1684 				hal_set_link_desc_addr(desc,
1685 					LINK_DESC_COOKIE(desc_id, i), paddr);
1686 				num_entries--;
1687 				desc_id++;
1688 				paddr += link_desc_size;
1689 			}
1690 		}
1691 		hal_srng_access_end_unlocked(soc->hal_soc,
1692 			soc->wbm_idle_link_ring.hal_srng);
1693 	} else {
1694 		uint32_t num_scatter_bufs;
1695 		uint32_t num_entries_per_buf;
1696 		uint32_t rem_entries;
1697 		uint8_t *scatter_buf_ptr;
1698 		uint16_t scatter_buf_num;
1699 
1700 		soc->wbm_idle_scatter_buf_size =
1701 			hal_idle_list_scatter_buf_size(soc->hal_soc);
1702 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
1703 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
1704 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1705 					soc->hal_soc, total_mem_size,
1706 					soc->wbm_idle_scatter_buf_size);
1707 
1708 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
1709 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1710 					FL("scatter bufs size out of bounds"));
1711 			goto fail;
1712 		}
1713 
1714 		for (i = 0; i < num_scatter_bufs; i++) {
1715 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
1716 				qdf_mem_alloc_consistent(soc->osdev,
1717 							soc->osdev->dev,
1718 				soc->wbm_idle_scatter_buf_size,
1719 				&(soc->wbm_idle_scatter_buf_base_paddr[i]));
1720 			if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
1721 				QDF_TRACE(QDF_MODULE_ID_DP,
1722 						QDF_TRACE_LEVEL_ERROR,
1723 					FL("Scatter list memory alloc failed"));
1724 				goto fail;
1725 			}
1726 		}
1727 
1728 		/* Populate idle list scatter buffers with link descriptor
1729 		 * pointers
1730 		 */
1731 		scatter_buf_num = 0;
1732 		scatter_buf_ptr = (uint8_t *)(
1733 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
1734 		rem_entries = num_entries_per_buf;
1735 
1736 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1737 			soc->link_desc_banks[i].base_paddr; i++) {
1738 			uint32_t num_link_descs =
1739 				(soc->link_desc_banks[i].size -
1740 				((unsigned long)(
1741 				soc->link_desc_banks[i].base_vaddr) -
1742 				(unsigned long)(
1743 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1744 				/ link_desc_size;
1745 			unsigned long paddr = (unsigned long)(
1746 				soc->link_desc_banks[i].base_paddr);
1747 
1748 			while (num_link_descs) {
1749 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
1750 					LINK_DESC_COOKIE(desc_id, i), paddr);
1751 				num_link_descs--;
1752 				desc_id++;
1753 				paddr += link_desc_size;
1754 				rem_entries--;
1755 				if (rem_entries) {
1756 					scatter_buf_ptr += entry_size;
1757 				} else {
1758 					rem_entries = num_entries_per_buf;
1759 					scatter_buf_num++;
1760 
1761 					if (scatter_buf_num >= num_scatter_bufs)
1762 						break;
1763 
1764 					scatter_buf_ptr = (uint8_t *)(
1765 						soc->wbm_idle_scatter_buf_base_vaddr[
1766 						scatter_buf_num]);
1767 				}
1768 			}
1769 		}
1770 		/* Setup link descriptor idle list in HW */
1771 		hal_setup_link_idle_list(soc->hal_soc,
1772 			soc->wbm_idle_scatter_buf_base_paddr,
1773 			soc->wbm_idle_scatter_buf_base_vaddr,
1774 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
1775 			(uint32_t)(scatter_buf_ptr -
1776 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
1777 			scatter_buf_num-1])), total_link_descs);
1778 	}
1779 	return 0;
1780 
1781 fail:
1782 	if (soc->wbm_idle_link_ring.hal_srng) {
1783 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
1784 				WBM_IDLE_LINK, 0);
1785 	}
1786 
1787 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1788 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1789 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1790 				soc->wbm_idle_scatter_buf_size,
1791 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1792 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1793 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1794 		}
1795 	}
1796 
1797 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1798 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1799 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1800 				soc->link_desc_banks[i].size,
1801 				soc->link_desc_banks[i].base_vaddr_unaligned,
1802 				soc->link_desc_banks[i].base_paddr_unaligned,
1803 				0);
1804 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1805 		}
1806 	}
1807 	return QDF_STATUS_E_FAILURE;
1808 }
1809 
1810 /*
1811  * Free link descriptor pool that was setup HW
1812  */
1813 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
1814 {
1815 	int i;
1816 
1817 	if (soc->wbm_idle_link_ring.hal_srng) {
1818 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
1819 			WBM_IDLE_LINK, 0);
1820 	}
1821 
1822 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1823 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1824 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1825 				soc->wbm_idle_scatter_buf_size,
1826 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1827 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1828 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1829 		}
1830 	}
1831 
1832 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1833 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1834 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1835 				soc->link_desc_banks[i].size,
1836 				soc->link_desc_banks[i].base_vaddr_unaligned,
1837 				soc->link_desc_banks[i].base_paddr_unaligned,
1838 				0);
1839 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1840 		}
1841 	}
1842 }
1843 
1844 #define REO_DST_RING_SIZE_QCA6290 1024
1845 #define REO_DST_RING_SIZE_QCA8074 2048
1846 
1847 /*
1848  * dp_wds_aging_timer_fn() - Timer callback function for WDS aging
1849  * @soc: Datapath SOC handle
1850  *
1851  * This is a timer function used to age out stale AST nodes from
1852  * AST table
1853  */
1854 #ifdef FEATURE_WDS
1855 static void dp_wds_aging_timer_fn(void *soc_hdl)
1856 {
1857 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
1858 	struct dp_pdev *pdev;
1859 	struct dp_vdev *vdev;
1860 	struct dp_peer *peer;
1861 	struct dp_ast_entry *ase, *temp_ase;
1862 	int i;
1863 
1864 	qdf_spin_lock_bh(&soc->ast_lock);
1865 
1866 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1867 		pdev = soc->pdev_list[i];
1868 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
1869 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1870 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1871 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
1872 					/*
1873 					 * Do not expire static ast entries
1874 					 * and HM WDS entries
1875 					 */
1876 					if (ase->type != CDP_TXRX_AST_TYPE_WDS)
1877 						continue;
1878 
1879 					if (ase->is_active) {
1880 						ase->is_active = FALSE;
1881 						continue;
1882 					}
1883 
1884 					DP_STATS_INC(soc, ast.aged_out, 1);
1885 					dp_peer_del_ast(soc, ase);
1886 				}
1887 			}
1888 		}
1889 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1890 	}
1891 
1892 	qdf_spin_unlock_bh(&soc->ast_lock);
1893 
1894 	if (qdf_atomic_read(&soc->cmn_init_done))
1895 		qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1896 }
1897 
1898 
1899 /*
1900  * dp_soc_wds_attach() - Setup WDS timer and AST table
1901  * @soc:		Datapath SOC handle
1902  *
1903  * Return: None
1904  */
1905 static void dp_soc_wds_attach(struct dp_soc *soc)
1906 {
1907 	qdf_timer_init(soc->osdev, &soc->wds_aging_timer,
1908 			dp_wds_aging_timer_fn, (void *)soc,
1909 			QDF_TIMER_TYPE_WAKE_APPS);
1910 
1911 	qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1912 }
1913 
1914 /*
1915  * dp_soc_wds_detach() - Detach WDS data structures and timers
1916  * @txrx_soc: DP SOC handle
1917  *
1918  * Return: None
1919  */
1920 static void dp_soc_wds_detach(struct dp_soc *soc)
1921 {
1922 	qdf_timer_stop(&soc->wds_aging_timer);
1923 	qdf_timer_free(&soc->wds_aging_timer);
1924 }
1925 #else
1926 static void dp_soc_wds_attach(struct dp_soc *soc)
1927 {
1928 }
1929 
1930 static void dp_soc_wds_detach(struct dp_soc *soc)
1931 {
1932 }
1933 #endif
1934 
1935 /*
1936  * dp_soc_reset_ring_map() - Reset cpu ring map
1937  * @soc: Datapath soc handler
1938  *
1939  * This api resets the default cpu ring map
1940  */
1941 
1942 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
1943 {
1944 	uint8_t i;
1945 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1946 
1947 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
1948 		if (nss_config == 1) {
1949 			/*
1950 			 * Setting Tx ring map for one nss offloaded radio
1951 			 */
1952 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
1953 		} else if (nss_config == 2) {
1954 			/*
1955 			 * Setting Tx ring for two nss offloaded radios
1956 			 */
1957 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
1958 		} else {
1959 			/*
1960 			 * Setting Tx ring map for all nss offloaded radios
1961 			 */
1962 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_ALL_RADIO_OFFLOADED_MAP][i];
1963 		}
1964 	}
1965 }
1966 
1967 /*
1968  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
1969  * @dp_soc - DP soc handle
1970  * @ring_type - ring type
1971  * @ring_num - ring_num
1972  *
1973  * return 0 or 1
1974  */
1975 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
1976 {
1977 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1978 	uint8_t status = 0;
1979 
1980 	switch (ring_type) {
1981 	case WBM2SW_RELEASE:
1982 	case REO_DST:
1983 	case RXDMA_BUF:
1984 		status = ((nss_config) & (1 << ring_num));
1985 		break;
1986 	default:
1987 		break;
1988 	}
1989 
1990 	return status;
1991 }
1992 
1993 /*
1994  * dp_soc_reset_intr_mask() - reset interrupt mask
1995  * @dp_soc - DP Soc handle
1996  *
1997  * Return: Return void
1998  */
1999 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
2000 {
2001 	uint8_t j;
2002 	int *grp_mask = NULL;
2003 	int group_number, mask, num_ring;
2004 
2005 	/* number of tx ring */
2006 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2007 
2008 	/*
2009 	 * group mask for tx completion  ring.
2010 	 */
2011 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2012 
2013 	/* loop and reset the mask for only offloaded ring */
2014 	for (j = 0; j < num_ring; j++) {
2015 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2016 			continue;
2017 		}
2018 
2019 		/*
2020 		 * Group number corresponding to tx offloaded ring.
2021 		 */
2022 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2023 		if (group_number < 0) {
2024 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2025 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2026 					WBM2SW_RELEASE, j);
2027 			return;
2028 		}
2029 
2030 		/* reset the tx mask for offloaded ring */
2031 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2032 		mask &= (~(1 << j));
2033 
2034 		/*
2035 		 * reset the interrupt mask for offloaded ring.
2036 		 */
2037 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2038 	}
2039 
2040 	/* number of rx rings */
2041 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2042 
2043 	/*
2044 	 * group mask for reo destination ring.
2045 	 */
2046 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2047 
2048 	/* loop and reset the mask for only offloaded ring */
2049 	for (j = 0; j < num_ring; j++) {
2050 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2051 			continue;
2052 		}
2053 
2054 		/*
2055 		 * Group number corresponding to rx offloaded ring.
2056 		 */
2057 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2058 		if (group_number < 0) {
2059 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2060 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2061 					REO_DST, j);
2062 			return;
2063 		}
2064 
2065 		/* set the interrupt mask for offloaded ring */
2066 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2067 		mask &= (~(1 << j));
2068 
2069 		/*
2070 		 * set the interrupt mask to zero for rx offloaded radio.
2071 		 */
2072 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2073 	}
2074 
2075 	/*
2076 	 * group mask for Rx buffer refill ring
2077 	 */
2078 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2079 
2080 	/* loop and reset the mask for only offloaded ring */
2081 	for (j = 0; j < MAX_PDEV_CNT; j++) {
2082 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2083 			continue;
2084 		}
2085 
2086 		/*
2087 		 * Group number corresponding to rx offloaded ring.
2088 		 */
2089 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2090 		if (group_number < 0) {
2091 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2092 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2093 					REO_DST, j);
2094 			return;
2095 		}
2096 
2097 		/* set the interrupt mask for offloaded ring */
2098 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2099 				group_number);
2100 		mask &= (~(1 << j));
2101 
2102 		/*
2103 		 * set the interrupt mask to zero for rx offloaded radio.
2104 		 */
2105 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2106 			group_number, mask);
2107 	}
2108 }
2109 
2110 #ifdef IPA_OFFLOAD
2111 /**
2112  * dp_reo_remap_config() - configure reo remap register value based
2113  *                         nss configuration.
2114  *		based on offload_radio value below remap configuration
2115  *		get applied.
2116  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2117  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2118  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2119  *		3 - both Radios handled by NSS (remap not required)
2120  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2121  *
2122  * @remap1: output parameter indicates reo remap 1 register value
2123  * @remap2: output parameter indicates reo remap 2 register value
2124  * Return: bool type, true if remap is configured else false.
2125  */
2126 static bool dp_reo_remap_config(struct dp_soc *soc,
2127 				uint32_t *remap1,
2128 				uint32_t *remap2)
2129 {
2130 
2131 	*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
2132 		(0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
2133 
2134 	*remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
2135 		(0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
2136 
2137 	return true;
2138 }
2139 #else
2140 static bool dp_reo_remap_config(struct dp_soc *soc,
2141 				uint32_t *remap1,
2142 				uint32_t *remap2)
2143 {
2144 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2145 
2146 	switch (offload_radio) {
2147 	case 0:
2148 		*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2149 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2150 			(0x3 << 18) | (0x4 << 21)) << 8;
2151 
2152 		*remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2153 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2154 			(0x3 << 18) | (0x4 << 21)) << 8;
2155 		break;
2156 
2157 	case 1:
2158 		*remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
2159 			(0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
2160 			(0x2 << 18) | (0x3 << 21)) << 8;
2161 
2162 		*remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
2163 			(0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
2164 			(0x4 << 18) | (0x2 << 21)) << 8;
2165 		break;
2166 
2167 	case 2:
2168 		*remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
2169 			(0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
2170 			(0x1 << 18) | (0x3 << 21)) << 8;
2171 
2172 		*remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
2173 			(0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
2174 			(0x4 << 18) | (0x1 << 21)) << 8;
2175 		break;
2176 
2177 	case 3:
2178 		/* return false if both radios are offloaded to NSS */
2179 		return false;
2180 	}
2181 	return true;
2182 }
2183 #endif
2184 
2185 /*
2186  * dp_reo_frag_dst_set() - configure reo register to set the
2187  *                        fragment destination ring
2188  * @soc : Datapath soc
2189  * @frag_dst_ring : output parameter to set fragment destination ring
2190  *
2191  * Based on offload_radio below fragment destination rings is selected
2192  * 0 - TCL
2193  * 1 - SW1
2194  * 2 - SW2
2195  * 3 - SW3
2196  * 4 - SW4
2197  * 5 - Release
2198  * 6 - FW
2199  * 7 - alternate select
2200  *
2201  * return: void
2202  */
2203 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2204 {
2205 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2206 
2207 	switch (offload_radio) {
2208 	case 0:
2209 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2210 		break;
2211 	case 3:
2212 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2213 		break;
2214 	default:
2215 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2216 				FL("dp_reo_frag_dst_set invalid offload radio config"));
2217 		break;
2218 	}
2219 }
2220 
2221 /*
2222  * dp_soc_cmn_setup() - Common SoC level initializion
2223  * @soc:		Datapath SOC handle
2224  *
2225  * This is an internal function used to setup common SOC data structures,
2226  * to be called from PDEV attach after receiving HW mode capabilities from FW
2227  */
2228 static int dp_soc_cmn_setup(struct dp_soc *soc)
2229 {
2230 	int i;
2231 	struct hal_reo_params reo_params;
2232 	int tx_ring_size;
2233 	int tx_comp_ring_size;
2234 	int reo_dst_ring_size;
2235 	uint32_t entries;
2236 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2237 
2238 	if (qdf_atomic_read(&soc->cmn_init_done))
2239 		return 0;
2240 
2241 	if (dp_hw_link_desc_pool_setup(soc))
2242 		goto fail1;
2243 
2244 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2245 	/* Setup SRNG rings */
2246 	/* Common rings */
2247 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
2248 		wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx))) {
2249 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2250 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
2251 		goto fail1;
2252 	}
2253 
2254 
2255 	soc->num_tcl_data_rings = 0;
2256 	/* Tx data rings */
2257 	if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
2258 		soc->num_tcl_data_rings =
2259 			wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
2260 		tx_comp_ring_size =
2261 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2262 		tx_ring_size =
2263 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2264 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2265 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
2266 				TCL_DATA, i, 0, tx_ring_size)) {
2267 				QDF_TRACE(QDF_MODULE_ID_DP,
2268 					QDF_TRACE_LEVEL_ERROR,
2269 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
2270 				goto fail1;
2271 			}
2272 			/*
2273 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2274 			 * count
2275 			 */
2276 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
2277 				WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
2278 				QDF_TRACE(QDF_MODULE_ID_DP,
2279 					QDF_TRACE_LEVEL_ERROR,
2280 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
2281 				goto fail1;
2282 			}
2283 		}
2284 	} else {
2285 		/* This will be incremented during per pdev ring setup */
2286 		soc->num_tcl_data_rings = 0;
2287 	}
2288 
2289 	if (dp_tx_soc_attach(soc)) {
2290 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2291 				FL("dp_tx_soc_attach failed"));
2292 		goto fail1;
2293 	}
2294 
2295 	entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
2296 	/* TCL command and status rings */
2297 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
2298 			  entries)) {
2299 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2300 			FL("dp_srng_setup failed for tcl_cmd_ring"));
2301 		goto fail1;
2302 	}
2303 
2304 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
2305 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
2306 			  entries)) {
2307 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2308 			FL("dp_srng_setup failed for tcl_status_ring"));
2309 		goto fail1;
2310 	}
2311 
2312 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2313 
2314 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2315 	 * descriptors
2316 	 */
2317 
2318 	/* Rx data rings */
2319 	if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2320 		soc->num_reo_dest_rings =
2321 			wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
2322 		QDF_TRACE(QDF_MODULE_ID_DP,
2323 			QDF_TRACE_LEVEL_INFO,
2324 			FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
2325 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2326 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2327 				i, 0, reo_dst_ring_size)) {
2328 				QDF_TRACE(QDF_MODULE_ID_DP,
2329 					  QDF_TRACE_LEVEL_ERROR,
2330 					  FL(RNG_ERR "reo_dest_ring [%d]"), i);
2331 				goto fail1;
2332 			}
2333 		}
2334 	} else {
2335 		/* This will be incremented during per pdev ring setup */
2336 		soc->num_reo_dest_rings = 0;
2337 	}
2338 
2339 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2340 	/* LMAC RxDMA to SW Rings configuration */
2341 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
2342 		/* Only valid for MCL */
2343 		struct dp_pdev *pdev = soc->pdev_list[0];
2344 
2345 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2346 			if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2347 					  RXDMA_DST, 0, i,
2348 					  entries)) {
2349 				QDF_TRACE(QDF_MODULE_ID_DP,
2350 					  QDF_TRACE_LEVEL_ERROR,
2351 					  FL(RNG_ERR "rxdma_err_dst_ring"));
2352 				goto fail1;
2353 			}
2354 		}
2355 	}
2356 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
2357 
2358 	/* REO reinjection ring */
2359 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
2360 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2361 			  entries)) {
2362 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2363 			  FL("dp_srng_setup failed for reo_reinject_ring"));
2364 		goto fail1;
2365 	}
2366 
2367 
2368 	/* Rx release ring */
2369 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2370 		wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx))) {
2371 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2372 			  FL("dp_srng_setup failed for rx_rel_ring"));
2373 		goto fail1;
2374 	}
2375 
2376 
2377 	/* Rx exception ring */
2378 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
2379 	if (dp_srng_setup(soc, &soc->reo_exception_ring,
2380 			  REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries)) {
2381 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2382 			  FL("dp_srng_setup failed for reo_exception_ring"));
2383 		goto fail1;
2384 	}
2385 
2386 
2387 	/* REO command and status rings */
2388 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
2389 		wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx))) {
2390 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2391 			FL("dp_srng_setup failed for reo_cmd_ring"));
2392 		goto fail1;
2393 	}
2394 
2395 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2396 	TAILQ_INIT(&soc->rx.reo_cmd_list);
2397 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2398 
2399 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
2400 		wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx))) {
2401 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2402 			FL("dp_srng_setup failed for reo_status_ring"));
2403 		goto fail1;
2404 	}
2405 
2406 	qdf_spinlock_create(&soc->ast_lock);
2407 	dp_soc_wds_attach(soc);
2408 
2409 	/* Reset the cpu ring map if radio is NSS offloaded */
2410 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
2411 		dp_soc_reset_cpu_ring_map(soc);
2412 		dp_soc_reset_intr_mask(soc);
2413 	}
2414 
2415 	/* Setup HW REO */
2416 	qdf_mem_zero(&reo_params, sizeof(reo_params));
2417 
2418 	if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
2419 
2420 		/*
2421 		 * Reo ring remap is not required if both radios
2422 		 * are offloaded to NSS
2423 		 */
2424 		if (!dp_reo_remap_config(soc,
2425 					&reo_params.remap1,
2426 					&reo_params.remap2))
2427 			goto out;
2428 
2429 		reo_params.rx_hash_enabled = true;
2430 	}
2431 
2432 	/* setup the global rx defrag waitlist */
2433 	TAILQ_INIT(&soc->rx.defrag.waitlist);
2434 	soc->rx.defrag.timeout_ms =
2435 		wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
2436 	soc->rx.flags.defrag_timeout_check =
2437 		wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
2438 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
2439 
2440 out:
2441 	/*
2442 	 * set the fragment destination ring
2443 	 */
2444 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2445 
2446 	hal_reo_setup(soc->hal_soc, &reo_params);
2447 
2448 	qdf_atomic_set(&soc->cmn_init_done, 1);
2449 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
2450 	return 0;
2451 fail1:
2452 	/*
2453 	 * Cleanup will be done as part of soc_detach, which will
2454 	 * be called on pdev attach failure
2455 	 */
2456 	return QDF_STATUS_E_FAILURE;
2457 }
2458 
2459 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
2460 
2461 static void dp_lro_hash_setup(struct dp_soc *soc)
2462 {
2463 	struct cdp_lro_hash_config lro_hash;
2464 
2465 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2466 		!wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2467 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2468 			 FL("LRO disabled RX hash disabled"));
2469 		return;
2470 	}
2471 
2472 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2473 
2474 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
2475 		lro_hash.lro_enable = 1;
2476 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2477 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
2478 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2479 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
2480 	}
2481 
2482 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, FL("enabled"));
2483 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
2484 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2485 		 LRO_IPV4_SEED_ARR_SZ));
2486 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2487 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2488 		 LRO_IPV6_SEED_ARR_SZ));
2489 
2490 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2491 		 "lro_hash: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
2492 		 lro_hash.lro_enable, lro_hash.tcp_flag,
2493 		 lro_hash.tcp_flag_mask);
2494 
2495 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2496 		 QDF_TRACE_LEVEL_ERROR,
2497 		 (void *)lro_hash.toeplitz_hash_ipv4,
2498 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2499 		 LRO_IPV4_SEED_ARR_SZ));
2500 
2501 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2502 		 QDF_TRACE_LEVEL_ERROR,
2503 		 (void *)lro_hash.toeplitz_hash_ipv6,
2504 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2505 		 LRO_IPV6_SEED_ARR_SZ));
2506 
2507 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2508 
2509 	if (soc->cdp_soc.ol_ops->lro_hash_config)
2510 		(void)soc->cdp_soc.ol_ops->lro_hash_config
2511 			(soc->ctrl_psoc, &lro_hash);
2512 }
2513 
2514 /*
2515 * dp_rxdma_ring_setup() - configure the RX DMA rings
2516 * @soc: data path SoC handle
2517 * @pdev: Physical device handle
2518 *
2519 * Return: 0 - success, > 0 - failure
2520 */
2521 #ifdef QCA_HOST2FW_RXBUF_RING
2522 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2523 	 struct dp_pdev *pdev)
2524 {
2525 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2526 	int max_mac_rings;
2527 	int i;
2528 
2529 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2530 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
2531 
2532 	for (i = 0; i < max_mac_rings; i++) {
2533 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2534 			 "%s: pdev_id %d mac_id %d",
2535 			 __func__, pdev->pdev_id, i);
2536 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
2537 			RXDMA_BUF, 1, i,
2538 			wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx))) {
2539 			QDF_TRACE(QDF_MODULE_ID_DP,
2540 				 QDF_TRACE_LEVEL_ERROR,
2541 				 FL("failed rx mac ring setup"));
2542 			return QDF_STATUS_E_FAILURE;
2543 		}
2544 	}
2545 	return QDF_STATUS_SUCCESS;
2546 }
2547 #else
2548 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2549 	 struct dp_pdev *pdev)
2550 {
2551 	return QDF_STATUS_SUCCESS;
2552 }
2553 #endif
2554 
2555 /**
2556  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2557  * @pdev - DP_PDEV handle
2558  *
2559  * Return: void
2560  */
2561 static inline void
2562 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2563 {
2564 	uint8_t map_id;
2565 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2566 		qdf_mem_copy(pdev->dscp_tid_map[map_id], default_dscp_tid_map,
2567 				sizeof(default_dscp_tid_map));
2568 	}
2569 	for (map_id = 0; map_id < HAL_MAX_HW_DSCP_TID_MAPS; map_id++) {
2570 		hal_tx_set_dscp_tid_map(pdev->soc->hal_soc,
2571 				pdev->dscp_tid_map[map_id],
2572 				map_id);
2573 	}
2574 }
2575 
2576 #ifdef QCA_SUPPORT_SON
2577 /**
2578  * dp_mark_peer_inact(): Update peer inactivity status
2579  * @peer_handle - datapath peer handle
2580  *
2581  * Return: void
2582  */
2583 void dp_mark_peer_inact(void *peer_handle, bool inactive)
2584 {
2585 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2586 	struct dp_pdev *pdev;
2587 	struct dp_soc *soc;
2588 	bool inactive_old;
2589 
2590 	if (!peer)
2591 		return;
2592 
2593 	pdev = peer->vdev->pdev;
2594 	soc = pdev->soc;
2595 
2596 	inactive_old = peer->peer_bs_inact_flag == 1;
2597 	if (!inactive)
2598 		peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2599 	peer->peer_bs_inact_flag = inactive ? 1 : 0;
2600 
2601 	if (inactive_old != inactive) {
2602 		/**
2603 		 * Note: a node lookup can happen in RX datapath context
2604 		 * when a node changes from inactive to active (at most once
2605 		 * per inactivity timeout threshold)
2606 		 */
2607 		if (soc->cdp_soc.ol_ops->record_act_change) {
2608 			soc->cdp_soc.ol_ops->record_act_change(
2609 					(void *)pdev->ctrl_pdev,
2610 					peer->mac_addr.raw, !inactive);
2611 		}
2612 	}
2613 }
2614 
2615 /**
2616  * dp_txrx_peer_find_inact_timeout_handler(): Inactivity timeout function
2617  *
2618  * Periodically checks the inactivity status
2619  */
2620 static os_timer_func(dp_txrx_peer_find_inact_timeout_handler)
2621 {
2622 	struct dp_pdev *pdev;
2623 	struct dp_vdev *vdev;
2624 	struct dp_peer *peer;
2625 	struct dp_soc *soc;
2626 	int i;
2627 
2628 	OS_GET_TIMER_ARG(soc, struct dp_soc *);
2629 
2630 	qdf_spin_lock(&soc->peer_ref_mutex);
2631 
2632 	for (i = 0; i < soc->pdev_count; i++) {
2633 	pdev = soc->pdev_list[i];
2634 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
2635 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2636 		if (vdev->opmode != wlan_op_mode_ap)
2637 			continue;
2638 
2639 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2640 			if (!peer->authorize) {
2641 				/**
2642 				 * Inactivity check only interested in
2643 				 * connected node
2644 				 */
2645 				continue;
2646 			}
2647 			if (peer->peer_bs_inact > soc->pdev_bs_inact_reload) {
2648 				/**
2649 				 * This check ensures we do not wait extra long
2650 				 * due to the potential race condition
2651 				 */
2652 				peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2653 			}
2654 			if (peer->peer_bs_inact > 0) {
2655 				/* Do not let it wrap around */
2656 				peer->peer_bs_inact--;
2657 			}
2658 			if (peer->peer_bs_inact == 0)
2659 				dp_mark_peer_inact(peer, true);
2660 		}
2661 	}
2662 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2663 	}
2664 
2665 	qdf_spin_unlock(&soc->peer_ref_mutex);
2666 	qdf_timer_mod(&soc->pdev_bs_inact_timer,
2667 		      soc->pdev_bs_inact_interval * 1000);
2668 }
2669 
2670 
2671 /**
2672  * dp_free_inact_timer(): free inact timer
2673  * @timer - inact timer handle
2674  *
2675  * Return: bool
2676  */
2677 void dp_free_inact_timer(struct dp_soc *soc)
2678 {
2679 	qdf_timer_free(&soc->pdev_bs_inact_timer);
2680 }
2681 #else
2682 
2683 void dp_mark_peer_inact(void *peer, bool inactive)
2684 {
2685 	return;
2686 }
2687 
2688 void dp_free_inact_timer(struct dp_soc *soc)
2689 {
2690 	return;
2691 }
2692 
2693 #endif
2694 
2695 #ifdef IPA_OFFLOAD
2696 /**
2697  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
2698  * @soc: data path instance
2699  * @pdev: core txrx pdev context
2700  *
2701  * Return: QDF_STATUS_SUCCESS: success
2702  *         QDF_STATUS_E_RESOURCES: Error return
2703  */
2704 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2705 					   struct dp_pdev *pdev)
2706 {
2707 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2708 	int entries;
2709 
2710 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2711 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
2712 
2713 	/* Setup second Rx refill buffer ring */
2714 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2715 			  IPA_RX_REFILL_BUF_RING_IDX,
2716 			  pdev->pdev_id,
2717 			  entries)) {
2718 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2719 			FL("dp_srng_setup failed second rx refill ring"));
2720 		return QDF_STATUS_E_FAILURE;
2721 	}
2722 	return QDF_STATUS_SUCCESS;
2723 }
2724 
2725 /**
2726  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
2727  * @soc: data path instance
2728  * @pdev: core txrx pdev context
2729  *
2730  * Return: void
2731  */
2732 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2733 					      struct dp_pdev *pdev)
2734 {
2735 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2736 			IPA_RX_REFILL_BUF_RING_IDX);
2737 }
2738 
2739 #else
2740 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2741 					   struct dp_pdev *pdev)
2742 {
2743 	return QDF_STATUS_SUCCESS;
2744 }
2745 
2746 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2747 					      struct dp_pdev *pdev)
2748 {
2749 }
2750 #endif
2751 
2752 #ifndef QCA_WIFI_QCA6390
2753 static
2754 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2755 {
2756 	int mac_id = 0;
2757 	int pdev_id = pdev->pdev_id;
2758 	int entries;
2759 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2760 
2761 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2762 
2763 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
2764 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
2765 
2766 		entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
2767 		if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
2768 				  RXDMA_MONITOR_BUF, 0, mac_for_pdev,
2769 				  entries)) {
2770 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2771 				  FL(RNG_ERR "rxdma_mon_buf_ring "));
2772 			return QDF_STATUS_E_NOMEM;
2773 		}
2774 
2775 		entries = wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
2776 		if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
2777 				  RXDMA_MONITOR_DST, 0, mac_for_pdev,
2778 				  entries)) {
2779 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2780 				  FL(RNG_ERR "rxdma_mon_dst_ring"));
2781 			return QDF_STATUS_E_NOMEM;
2782 		}
2783 
2784 		entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
2785 		if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring[mac_id],
2786 				  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
2787 				  entries)) {
2788 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2789 				  FL(RNG_ERR "rxdma_mon_status_ring"));
2790 			return QDF_STATUS_E_NOMEM;
2791 		}
2792 
2793 		entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
2794 		if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
2795 				  RXDMA_MONITOR_DESC, 0, mac_for_pdev,
2796 				  entries)) {
2797 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2798 				  FL(RNG_ERR "rxdma_mon_desc_ring"));
2799 			return QDF_STATUS_E_NOMEM;
2800 		}
2801 	}
2802 	return QDF_STATUS_SUCCESS;
2803 }
2804 #else
2805 static QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2806 {
2807 	return QDF_STATUS_SUCCESS;
2808 }
2809 #endif
2810 
2811 /*dp_iterate_update_peer_list - update peer stats on cal client timer
2812  * @pdev_hdl: pdev handle
2813  */
2814 #ifdef ATH_SUPPORT_EXT_STAT
2815 void  dp_iterate_update_peer_list(void *pdev_hdl)
2816 {
2817 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
2818 	struct dp_vdev *vdev = NULL;
2819 	struct dp_peer *peer = NULL;
2820 
2821 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
2822 		DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
2823 			dp_cal_client_update_peer_stats(&peer->stats);
2824 		}
2825 	}
2826 }
2827 #else
2828 void  dp_iterate_update_peer_list(void *pdev_hdl)
2829 {
2830 }
2831 #endif
2832 
2833 /*
2834 * dp_pdev_attach_wifi3() - attach txrx pdev
2835 * @ctrl_pdev: Opaque PDEV object
2836 * @txrx_soc: Datapath SOC handle
2837 * @htc_handle: HTC handle for host-target interface
2838 * @qdf_osdev: QDF OS device
2839 * @pdev_id: PDEV ID
2840 *
2841 * Return: DP PDEV handle on success, NULL on failure
2842 */
2843 static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
2844 	struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
2845 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
2846 {
2847 	int tx_ring_size;
2848 	int tx_comp_ring_size;
2849 	int reo_dst_ring_size;
2850 	int entries;
2851 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2852 	int nss_cfg;
2853 
2854 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2855 	struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
2856 
2857 	if (!pdev) {
2858 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2859 			FL("DP PDEV memory allocation failed"));
2860 		goto fail0;
2861 	}
2862 
2863 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2864 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
2865 
2866 	if (!pdev->wlan_cfg_ctx) {
2867 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2868 			FL("pdev cfg_attach failed"));
2869 
2870 		qdf_mem_free(pdev);
2871 		goto fail0;
2872 	}
2873 
2874 	/*
2875 	 * set nss pdev config based on soc config
2876 	 */
2877 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
2878 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
2879 			(nss_cfg & (1 << pdev_id)));
2880 
2881 	pdev->soc = soc;
2882 	pdev->ctrl_pdev = ctrl_pdev;
2883 	pdev->pdev_id = pdev_id;
2884 	soc->pdev_list[pdev_id] = pdev;
2885 	soc->pdev_count++;
2886 
2887 	TAILQ_INIT(&pdev->vdev_list);
2888 	qdf_spinlock_create(&pdev->vdev_list_lock);
2889 	pdev->vdev_count = 0;
2890 
2891 	qdf_spinlock_create(&pdev->tx_mutex);
2892 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
2893 	TAILQ_INIT(&pdev->neighbour_peers_list);
2894 	pdev->neighbour_peers_added = false;
2895 
2896 	if (dp_soc_cmn_setup(soc)) {
2897 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2898 			FL("dp_soc_cmn_setup failed"));
2899 		goto fail1;
2900 	}
2901 
2902 	/* Setup per PDEV TCL rings if configured */
2903 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2904 		tx_ring_size =
2905 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2906 		tx_comp_ring_size =
2907 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2908 
2909 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
2910 			pdev_id, pdev_id, tx_ring_size)) {
2911 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2912 				FL("dp_srng_setup failed for tcl_data_ring"));
2913 			goto fail1;
2914 		}
2915 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
2916 			WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
2917 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2918 				FL("dp_srng_setup failed for tx_comp_ring"));
2919 			goto fail1;
2920 		}
2921 		soc->num_tcl_data_rings++;
2922 	}
2923 
2924 	/* Tx specific init */
2925 	if (dp_tx_pdev_attach(pdev)) {
2926 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2927 			FL("dp_tx_pdev_attach failed"));
2928 		goto fail1;
2929 	}
2930 
2931 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2932 	/* Setup per PDEV REO rings if configured */
2933 	if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2934 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
2935 			pdev_id, pdev_id, reo_dst_ring_size)) {
2936 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2937 				FL("dp_srng_setup failed for reo_dest_ringn"));
2938 			goto fail1;
2939 		}
2940 		soc->num_reo_dest_rings++;
2941 
2942 	}
2943 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
2944 		wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx))) {
2945 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2946 			 FL("dp_srng_setup failed rx refill ring"));
2947 		goto fail1;
2948 	}
2949 
2950 	if (dp_rxdma_ring_setup(soc, pdev)) {
2951 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2952 			 FL("RXDMA ring config failed"));
2953 		goto fail1;
2954 	}
2955 
2956 	if (dp_mon_rings_setup(soc, pdev)) {
2957 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2958 			  FL("MONITOR rings setup failed"));
2959 		goto fail1;
2960 	}
2961 
2962 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2963 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2964 		if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
2965 				  0, pdev_id,
2966 				  entries)) {
2967 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2968 				  FL(RNG_ERR "rxdma_err_dst_ring"));
2969 			goto fail1;
2970 		}
2971 	}
2972 
2973 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
2974 		goto fail1;
2975 
2976 	if (dp_ipa_ring_resource_setup(soc, pdev))
2977 		goto fail1;
2978 
2979 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
2980 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2981 			FL("dp_ipa_uc_attach failed"));
2982 		goto fail1;
2983 	}
2984 
2985 	/* Rx specific init */
2986 	if (dp_rx_pdev_attach(pdev)) {
2987 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2988 			FL("dp_rx_pdev_attach failed"));
2989 		goto fail0;
2990 	}
2991 	DP_STATS_INIT(pdev);
2992 
2993 	/* Monitor filter init */
2994 	pdev->mon_filter_mode = MON_FILTER_ALL;
2995 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
2996 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
2997 	pdev->fp_data_filter = FILTER_DATA_ALL;
2998 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
2999 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
3000 	pdev->mo_data_filter = FILTER_DATA_ALL;
3001 
3002 	dp_local_peer_id_pool_init(pdev);
3003 
3004 	dp_dscp_tid_map_setup(pdev);
3005 
3006 	/* Rx monitor mode specific init */
3007 	if (dp_rx_pdev_mon_attach(pdev)) {
3008 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3009 				"dp_rx_pdev_attach failed");
3010 		goto fail1;
3011 	}
3012 
3013 	if (dp_wdi_event_attach(pdev)) {
3014 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3015 				"dp_wdi_evet_attach failed");
3016 		goto fail1;
3017 	}
3018 
3019 	/* set the reo destination during initialization */
3020 	pdev->reo_dest = pdev->pdev_id + 1;
3021 
3022 	/*
3023 	 * initialize ppdu tlv list
3024 	 */
3025 	TAILQ_INIT(&pdev->ppdu_info_list);
3026 	pdev->tlv_count = 0;
3027 	pdev->list_depth = 0;
3028 
3029 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
3030 
3031 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
3032 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
3033 			      TRUE);
3034 
3035 	/* initlialize cal client timer */
3036 	dp_cal_client_attach(&pdev->cal_client_ctx, pdev, pdev->soc->osdev,
3037 			     &dp_iterate_update_peer_list);
3038 
3039 	return (struct cdp_pdev *)pdev;
3040 
3041 fail1:
3042 	dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
3043 
3044 fail0:
3045 	return NULL;
3046 }
3047 
3048 /*
3049 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
3050 * @soc: data path SoC handle
3051 * @pdev: Physical device handle
3052 *
3053 * Return: void
3054 */
3055 #ifdef QCA_HOST2FW_RXBUF_RING
3056 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3057 	 struct dp_pdev *pdev)
3058 {
3059 	int max_mac_rings =
3060 		 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
3061 	int i;
3062 
3063 	max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
3064 				max_mac_rings : MAX_RX_MAC_RINGS;
3065 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
3066 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
3067 			 RXDMA_BUF, 1);
3068 
3069 	qdf_timer_free(&soc->mon_reap_timer);
3070 }
3071 #else
3072 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3073 	 struct dp_pdev *pdev)
3074 {
3075 }
3076 #endif
3077 
3078 /*
3079  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3080  * @pdev: device object
3081  *
3082  * Return: void
3083  */
3084 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3085 {
3086 	struct dp_neighbour_peer *peer = NULL;
3087 	struct dp_neighbour_peer *temp_peer = NULL;
3088 
3089 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3090 			neighbour_peer_list_elem, temp_peer) {
3091 		/* delete this peer from the list */
3092 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
3093 				peer, neighbour_peer_list_elem);
3094 		qdf_mem_free(peer);
3095 	}
3096 
3097 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3098 }
3099 
3100 /**
3101 * dp_htt_ppdu_stats_detach() - detach stats resources
3102 * @pdev: Datapath PDEV handle
3103 *
3104 * Return: void
3105 */
3106 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3107 {
3108 	struct ppdu_info *ppdu_info, *ppdu_info_next;
3109 
3110 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3111 			ppdu_info_list_elem, ppdu_info_next) {
3112 		if (!ppdu_info)
3113 			break;
3114 		qdf_assert_always(ppdu_info->nbuf);
3115 		qdf_nbuf_free(ppdu_info->nbuf);
3116 		qdf_mem_free(ppdu_info);
3117 	}
3118 }
3119 
3120 #ifndef QCA_WIFI_QCA6390
3121 static
3122 void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3123 			int mac_id)
3124 {
3125 		dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
3126 				RXDMA_MONITOR_BUF, 0);
3127 		dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
3128 				RXDMA_MONITOR_DST, 0);
3129 
3130 		dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring[mac_id],
3131 				RXDMA_MONITOR_STATUS, 0);
3132 
3133 		dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
3134 				RXDMA_MONITOR_DESC, 0);
3135 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3136 				RXDMA_DST, 0);
3137 }
3138 #else
3139 static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3140 			       int mac_id)
3141 {
3142 }
3143 #endif
3144 
3145 /*
3146 * dp_pdev_detach_wifi3() - detach txrx pdev
3147 * @txrx_pdev: Datapath PDEV handle
3148 * @force: Force detach
3149 *
3150 */
3151 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
3152 {
3153 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3154 	struct dp_soc *soc = pdev->soc;
3155 	qdf_nbuf_t curr_nbuf, next_nbuf;
3156 	int mac_id;
3157 
3158 	dp_wdi_event_detach(pdev);
3159 
3160 	dp_tx_pdev_detach(pdev);
3161 
3162 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3163 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
3164 			TCL_DATA, pdev->pdev_id);
3165 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
3166 			WBM2SW_RELEASE, pdev->pdev_id);
3167 	}
3168 
3169 	dp_pktlogmod_exit(pdev);
3170 
3171 	dp_rx_pdev_detach(pdev);
3172 	dp_rx_pdev_mon_detach(pdev);
3173 	dp_neighbour_peers_detach(pdev);
3174 	qdf_spinlock_destroy(&pdev->tx_mutex);
3175 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
3176 
3177 	dp_ipa_uc_detach(soc, pdev);
3178 
3179 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
3180 
3181 	/* Cleanup per PDEV REO rings if configured */
3182 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3183 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
3184 			REO_DST, pdev->pdev_id);
3185 	}
3186 
3187 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
3188 
3189 	dp_rxdma_ring_cleanup(soc, pdev);
3190 
3191 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3192 		dp_mon_ring_deinit(soc, pdev, mac_id);
3193 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3194 			RXDMA_DST, 0);
3195 	}
3196 
3197 	curr_nbuf = pdev->invalid_peer_head_msdu;
3198 	while (curr_nbuf) {
3199 		next_nbuf = qdf_nbuf_next(curr_nbuf);
3200 		qdf_nbuf_free(curr_nbuf);
3201 		curr_nbuf = next_nbuf;
3202 	}
3203 
3204 	dp_htt_ppdu_stats_detach(pdev);
3205 
3206 	qdf_nbuf_free(pdev->sojourn_buf);
3207 
3208 	dp_cal_client_detach(&pdev->cal_client_ctx);
3209 	soc->pdev_list[pdev->pdev_id] = NULL;
3210 	soc->pdev_count--;
3211 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
3212 	qdf_mem_free(pdev->dp_txrx_handle);
3213 	qdf_mem_free(pdev);
3214 }
3215 
3216 /*
3217  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
3218  * @soc: DP SOC handle
3219  */
3220 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
3221 {
3222 	struct reo_desc_list_node *desc;
3223 	struct dp_rx_tid *rx_tid;
3224 
3225 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3226 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
3227 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3228 		rx_tid = &desc->rx_tid;
3229 		qdf_mem_unmap_nbytes_single(soc->osdev,
3230 			rx_tid->hw_qdesc_paddr,
3231 			QDF_DMA_BIDIRECTIONAL,
3232 			rx_tid->hw_qdesc_alloc_size);
3233 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3234 		qdf_mem_free(desc);
3235 	}
3236 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3237 	qdf_list_destroy(&soc->reo_desc_freelist);
3238 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
3239 }
3240 
3241 /*
3242  * dp_soc_detach_wifi3() - Detach txrx SOC
3243  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
3244  */
3245 static void dp_soc_detach_wifi3(void *txrx_soc)
3246 {
3247 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3248 	int i;
3249 
3250 	qdf_atomic_set(&soc->cmn_init_done, 0);
3251 
3252 	qdf_flush_work(&soc->htt_stats.work);
3253 	qdf_disable_work(&soc->htt_stats.work);
3254 
3255 	/* Free pending htt stats messages */
3256 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
3257 
3258 	dp_free_inact_timer(soc);
3259 
3260 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3261 		if (soc->pdev_list[i])
3262 			dp_pdev_detach_wifi3(
3263 				(struct cdp_pdev *)soc->pdev_list[i], 1);
3264 	}
3265 
3266 	dp_peer_find_detach(soc);
3267 
3268 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
3269 	 * SW descriptors
3270 	 */
3271 
3272 	/* Free the ring memories */
3273 	/* Common rings */
3274 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3275 
3276 	dp_tx_soc_detach(soc);
3277 	/* Tx data rings */
3278 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3279 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3280 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
3281 				TCL_DATA, i);
3282 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
3283 				WBM2SW_RELEASE, i);
3284 		}
3285 	}
3286 
3287 	/* TCL command and status rings */
3288 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3289 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3290 
3291 	/* Rx data rings */
3292 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3293 		soc->num_reo_dest_rings =
3294 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3295 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
3296 			/* TODO: Get number of rings and ring sizes
3297 			 * from wlan_cfg
3298 			 */
3299 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
3300 				REO_DST, i);
3301 		}
3302 	}
3303 	/* REO reinjection ring */
3304 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3305 
3306 	/* Rx release ring */
3307 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3308 
3309 	/* Rx exception ring */
3310 	/* TODO: Better to store ring_type and ring_num in
3311 	 * dp_srng during setup
3312 	 */
3313 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3314 
3315 	/* REO command and status rings */
3316 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3317 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
3318 	dp_hw_link_desc_pool_cleanup(soc);
3319 
3320 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
3321 	qdf_spinlock_destroy(&soc->htt_stats.lock);
3322 
3323 	htt_soc_detach(soc->htt_handle);
3324 
3325 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3326 
3327 	dp_reo_cmdlist_destroy(soc);
3328 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3329 	dp_reo_desc_freelist_destroy(soc);
3330 
3331 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
3332 
3333 	dp_soc_wds_detach(soc);
3334 	qdf_spinlock_destroy(&soc->ast_lock);
3335 
3336 	qdf_mem_free(soc);
3337 }
3338 
3339 #ifndef QCA_WIFI_QCA6390
3340 static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3341 				  struct dp_pdev *pdev,
3342 				  int mac_id,
3343 				  int mac_for_pdev)
3344 {
3345 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3346 		       pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3347 		       RXDMA_MONITOR_BUF);
3348 
3349 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3350 		       pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3351 		       RXDMA_MONITOR_DST);
3352 
3353 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3354 		       pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3355 		       RXDMA_MONITOR_STATUS);
3356 
3357 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3358 		       pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3359 		       RXDMA_MONITOR_DESC);
3360 }
3361 #else
3362 static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3363 				  struct dp_pdev *pdev,
3364 				  int mac_id,
3365 				  int mac_for_pdev)
3366 {
3367 }
3368 #endif
3369 /*
3370  * dp_rxdma_ring_config() - configure the RX DMA rings
3371  *
3372  * This function is used to configure the MAC rings.
3373  * On MCL host provides buffers in Host2FW ring
3374  * FW refills (copies) buffers to the ring and updates
3375  * ring_idx in register
3376  *
3377  * @soc: data path SoC handle
3378  *
3379  * Return: void
3380  */
3381 #ifdef QCA_HOST2FW_RXBUF_RING
3382 static void dp_rxdma_ring_config(struct dp_soc *soc)
3383 {
3384 	int i;
3385 
3386 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3387 		struct dp_pdev *pdev = soc->pdev_list[i];
3388 
3389 		if (pdev) {
3390 			int mac_id;
3391 			bool dbs_enable = 0;
3392 			int max_mac_rings =
3393 				 wlan_cfg_get_num_mac_rings
3394 				(pdev->wlan_cfg_ctx);
3395 
3396 			htt_srng_setup(soc->htt_handle, 0,
3397 				 pdev->rx_refill_buf_ring.hal_srng,
3398 				 RXDMA_BUF);
3399 
3400 			if (pdev->rx_refill_buf_ring2.hal_srng)
3401 				htt_srng_setup(soc->htt_handle, 0,
3402 					pdev->rx_refill_buf_ring2.hal_srng,
3403 					RXDMA_BUF);
3404 
3405 			if (soc->cdp_soc.ol_ops->
3406 				is_hw_dbs_2x2_capable) {
3407 				dbs_enable = soc->cdp_soc.ol_ops->
3408 					is_hw_dbs_2x2_capable(soc->ctrl_psoc);
3409 			}
3410 
3411 			if (dbs_enable) {
3412 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3413 				QDF_TRACE_LEVEL_ERROR,
3414 				FL("DBS enabled max_mac_rings %d"),
3415 					 max_mac_rings);
3416 			} else {
3417 				max_mac_rings = 1;
3418 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3419 					 QDF_TRACE_LEVEL_ERROR,
3420 					 FL("DBS disabled, max_mac_rings %d"),
3421 					 max_mac_rings);
3422 			}
3423 
3424 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3425 					 FL("pdev_id %d max_mac_rings %d"),
3426 					 pdev->pdev_id, max_mac_rings);
3427 
3428 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
3429 				int mac_for_pdev = dp_get_mac_id_for_pdev(
3430 							mac_id, pdev->pdev_id);
3431 
3432 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3433 					 QDF_TRACE_LEVEL_ERROR,
3434 					 FL("mac_id %d"), mac_for_pdev);
3435 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3436 					 pdev->rx_mac_buf_ring[mac_id]
3437 						.hal_srng,
3438 					 RXDMA_BUF);
3439 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3440 					pdev->rxdma_err_dst_ring[mac_id]
3441 						.hal_srng,
3442 					RXDMA_DST);
3443 
3444 				/* Configure monitor mode rings */
3445 				dp_mon_htt_srng_setup(soc, pdev, mac_id,
3446 						      mac_for_pdev);
3447 
3448 			}
3449 		}
3450 	}
3451 
3452 	/*
3453 	 * Timer to reap rxdma status rings.
3454 	 * Needed until we enable ppdu end interrupts
3455 	 */
3456 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
3457 			dp_service_mon_rings, (void *)soc,
3458 			QDF_TIMER_TYPE_WAKE_APPS);
3459 	soc->reap_timer_init = 1;
3460 }
3461 #else
3462 /* This is only for WIN */
3463 static void dp_rxdma_ring_config(struct dp_soc *soc)
3464 {
3465 	int i;
3466 	int mac_id;
3467 
3468 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3469 		struct dp_pdev *pdev = soc->pdev_list[i];
3470 
3471 		if (pdev == NULL)
3472 			continue;
3473 
3474 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3475 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
3476 
3477 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3478 				pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
3479 
3480 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3481 				pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3482 				RXDMA_MONITOR_BUF);
3483 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3484 				pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3485 				RXDMA_MONITOR_DST);
3486 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3487 				pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3488 				RXDMA_MONITOR_STATUS);
3489 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3490 				pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3491 				RXDMA_MONITOR_DESC);
3492 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3493 				pdev->rxdma_err_dst_ring[mac_id].hal_srng,
3494 				RXDMA_DST);
3495 		}
3496 	}
3497 }
3498 #endif
3499 
3500 /*
3501  * dp_soc_attach_target_wifi3() - SOC initialization in the target
3502  * @txrx_soc: Datapath SOC handle
3503  */
3504 static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
3505 {
3506 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
3507 
3508 	htt_soc_attach_target(soc->htt_handle);
3509 
3510 	dp_rxdma_ring_config(soc);
3511 
3512 	DP_STATS_INIT(soc);
3513 
3514 	/* initialize work queue for stats processing */
3515 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
3516 
3517 	return 0;
3518 }
3519 
3520 /*
3521  * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
3522  * @txrx_soc: Datapath SOC handle
3523  */
3524 static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
3525 {
3526 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3527 	return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
3528 }
3529 /*
3530  * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
3531  * @txrx_soc: Datapath SOC handle
3532  * @nss_cfg: nss config
3533  */
3534 static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
3535 {
3536 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3537 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
3538 
3539 	wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
3540 
3541 	/*
3542 	 * TODO: masked out based on the per offloaded radio
3543 	 */
3544 	if (config == dp_nss_cfg_dbdc) {
3545 		wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
3546 		wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
3547 		wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
3548 		wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
3549 	}
3550 
3551 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3552 		  FL("nss-wifi<0> nss config is enabled"));
3553 }
3554 /*
3555 * dp_vdev_attach_wifi3() - attach txrx vdev
3556 * @txrx_pdev: Datapath PDEV handle
3557 * @vdev_mac_addr: MAC address of the virtual interface
3558 * @vdev_id: VDEV Id
3559 * @wlan_op_mode: VDEV operating mode
3560 *
3561 * Return: DP VDEV handle on success, NULL on failure
3562 */
3563 static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
3564 	uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
3565 {
3566 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3567 	struct dp_soc *soc = pdev->soc;
3568 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
3569 
3570 	if (!vdev) {
3571 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3572 			FL("DP VDEV memory allocation failed"));
3573 		goto fail0;
3574 	}
3575 
3576 	vdev->pdev = pdev;
3577 	vdev->vdev_id = vdev_id;
3578 	vdev->opmode = op_mode;
3579 	vdev->osdev = soc->osdev;
3580 
3581 	vdev->osif_rx = NULL;
3582 	vdev->osif_rsim_rx_decap = NULL;
3583 	vdev->osif_get_key = NULL;
3584 	vdev->osif_rx_mon = NULL;
3585 	vdev->osif_tx_free_ext = NULL;
3586 	vdev->osif_vdev = NULL;
3587 
3588 	vdev->delete.pending = 0;
3589 	vdev->safemode = 0;
3590 	vdev->drop_unenc = 1;
3591 	vdev->sec_type = cdp_sec_type_none;
3592 #ifdef notyet
3593 	vdev->filters_num = 0;
3594 #endif
3595 
3596 	qdf_mem_copy(
3597 		&vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3598 
3599 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3600 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3601 	vdev->dscp_tid_map_id = 0;
3602 	vdev->mcast_enhancement_en = 0;
3603 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
3604 
3605 	/* TODO: Initialize default HTT meta data that will be used in
3606 	 * TCL descriptors for packets transmitted from this VDEV
3607 	 */
3608 
3609 	TAILQ_INIT(&vdev->peer_list);
3610 
3611 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3612 	/* add this vdev into the pdev's list */
3613 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
3614 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3615 	pdev->vdev_count++;
3616 
3617 	dp_tx_vdev_attach(vdev);
3618 
3619 
3620 	if ((soc->intr_mode == DP_INTR_POLL) &&
3621 			wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
3622 		if (pdev->vdev_count == 1)
3623 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
3624 	}
3625 
3626 	dp_lro_hash_setup(soc);
3627 
3628 	/* LRO */
3629 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3630 		wlan_op_mode_sta == vdev->opmode)
3631 		vdev->lro_enable = true;
3632 
3633 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3634 		 "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable);
3635 
3636 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3637 		"Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
3638 	DP_STATS_INIT(vdev);
3639 
3640 	if (wlan_op_mode_sta == vdev->opmode)
3641 		dp_peer_create_wifi3((struct cdp_vdev *)vdev,
3642 							vdev->mac_addr.raw,
3643 							NULL);
3644 
3645 	return (struct cdp_vdev *)vdev;
3646 
3647 fail0:
3648 	return NULL;
3649 }
3650 
3651 /**
3652  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
3653  * @vdev: Datapath VDEV handle
3654  * @osif_vdev: OSIF vdev handle
3655  * @ctrl_vdev: UMAC vdev handle
3656  * @txrx_ops: Tx and Rx operations
3657  *
3658  * Return: DP VDEV handle on success, NULL on failure
3659  */
3660 static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
3661 	void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
3662 	struct ol_txrx_ops *txrx_ops)
3663 {
3664 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3665 	vdev->osif_vdev = osif_vdev;
3666 	vdev->ctrl_vdev = ctrl_vdev;
3667 	vdev->osif_rx = txrx_ops->rx.rx;
3668 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
3669 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
3670 	vdev->osif_get_key = txrx_ops->get_key;
3671 	vdev->osif_rx_mon = txrx_ops->rx.mon;
3672 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
3673 #ifdef notyet
3674 #if ATH_SUPPORT_WAPI
3675 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
3676 #endif
3677 #endif
3678 #ifdef UMAC_SUPPORT_PROXY_ARP
3679 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
3680 #endif
3681 	vdev->me_convert = txrx_ops->me_convert;
3682 
3683 	/* TODO: Enable the following once Tx code is integrated */
3684 	if (vdev->mesh_vdev)
3685 		txrx_ops->tx.tx = dp_tx_send_mesh;
3686 	else
3687 		txrx_ops->tx.tx = dp_tx_send;
3688 
3689 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
3690 
3691 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
3692 		"DP Vdev Register success");
3693 }
3694 
3695 /**
3696  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
3697  * @vdev: Datapath VDEV handle
3698  *
3699  * Return: void
3700  */
3701 static void dp_vdev_flush_peers(struct dp_vdev *vdev)
3702 {
3703 	struct dp_pdev *pdev = vdev->pdev;
3704 	struct dp_soc *soc = pdev->soc;
3705 	struct dp_peer *peer;
3706 	uint16_t *peer_ids;
3707 	uint8_t i = 0, j = 0;
3708 
3709 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
3710 	if (!peer_ids) {
3711 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3712 			"DP alloc failure - unable to flush peers");
3713 		return;
3714 	}
3715 
3716 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3717 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3718 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3719 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
3720 				if (j < soc->max_peers)
3721 					peer_ids[j++] = peer->peer_ids[i];
3722 	}
3723 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3724 
3725 	for (i = 0; i < j ; i++)
3726 		dp_rx_peer_unmap_handler(soc, peer_ids[i]);
3727 
3728 	qdf_mem_free(peer_ids);
3729 
3730 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3731 		FL("Flushed peers for vdev object %pK "), vdev);
3732 }
3733 
3734 /*
3735  * dp_vdev_detach_wifi3() - Detach txrx vdev
3736  * @txrx_vdev:		Datapath VDEV handle
3737  * @callback:		Callback OL_IF on completion of detach
3738  * @cb_context:	Callback context
3739  *
3740  */
3741 static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
3742 	ol_txrx_vdev_delete_cb callback, void *cb_context)
3743 {
3744 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3745 	struct dp_pdev *pdev = vdev->pdev;
3746 	struct dp_soc *soc = pdev->soc;
3747 	struct dp_neighbour_peer *peer = NULL;
3748 
3749 	/* preconditions */
3750 	qdf_assert(vdev);
3751 
3752 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3753 	/* remove the vdev from its parent pdev's list */
3754 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
3755 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3756 
3757 	if (wlan_op_mode_sta == vdev->opmode)
3758 		dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
3759 
3760 	/*
3761 	 * If Target is hung, flush all peers before detaching vdev
3762 	 * this will free all references held due to missing
3763 	 * unmap commands from Target
3764 	 */
3765 	if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
3766 		dp_vdev_flush_peers(vdev);
3767 
3768 	/*
3769 	 * Use peer_ref_mutex while accessing peer_list, in case
3770 	 * a peer is in the process of being removed from the list.
3771 	 */
3772 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3773 	/* check that the vdev has no peers allocated */
3774 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
3775 		/* debug print - will be removed later */
3776 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
3777 			FL("not deleting vdev object %pK (%pM)"
3778 			"until deletion finishes for all its peers"),
3779 			vdev, vdev->mac_addr.raw);
3780 		/* indicate that the vdev needs to be deleted */
3781 		vdev->delete.pending = 1;
3782 		vdev->delete.callback = callback;
3783 		vdev->delete.context = cb_context;
3784 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3785 		return;
3786 	}
3787 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3788 
3789 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3790 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
3791 		      neighbour_peer_list_elem) {
3792 		QDF_ASSERT(peer->vdev != vdev);
3793 	}
3794 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3795 
3796 	dp_tx_vdev_detach(vdev);
3797 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3798 		FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
3799 
3800 	qdf_mem_free(vdev);
3801 
3802 	if (callback)
3803 		callback(cb_context);
3804 }
3805 
3806 /*
3807  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
3808  * @soc - datapath soc handle
3809  * @peer - datapath peer handle
3810  *
3811  * Delete the AST entries belonging to a peer
3812  */
3813 #ifdef FEATURE_AST
3814 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3815 					      struct dp_peer *peer)
3816 {
3817 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
3818 
3819 	qdf_spin_lock_bh(&soc->ast_lock);
3820 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
3821 		dp_peer_del_ast(soc, ast_entry);
3822 
3823 	peer->self_ast_entry = NULL;
3824 	TAILQ_INIT(&peer->ast_entry_list);
3825 	qdf_spin_unlock_bh(&soc->ast_lock);
3826 }
3827 #else
3828 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3829 					      struct dp_peer *peer)
3830 {
3831 }
3832 #endif
3833 
3834 #if ATH_SUPPORT_WRAP
3835 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
3836 						uint8_t *peer_mac_addr)
3837 {
3838 	struct dp_peer *peer;
3839 
3840 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
3841 				      0, vdev->vdev_id);
3842 	if (!peer)
3843 		return NULL;
3844 
3845 	if (peer->bss_peer)
3846 		return peer;
3847 
3848 	qdf_atomic_dec(&peer->ref_cnt);
3849 	return NULL;
3850 }
3851 #else
3852 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
3853 						uint8_t *peer_mac_addr)
3854 {
3855 	struct dp_peer *peer;
3856 
3857 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
3858 				      0, vdev->vdev_id);
3859 	if (!peer)
3860 		return NULL;
3861 
3862 	if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
3863 		return peer;
3864 
3865 	qdf_atomic_dec(&peer->ref_cnt);
3866 	return NULL;
3867 }
3868 #endif
3869 
3870 /*
3871  * dp_peer_create_wifi3() - attach txrx peer
3872  * @txrx_vdev: Datapath VDEV handle
3873  * @peer_mac_addr: Peer MAC address
3874  *
3875  * Return: DP peeer handle on success, NULL on failure
3876  */
3877 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
3878 		uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
3879 {
3880 	struct dp_peer *peer;
3881 	int i;
3882 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3883 	struct dp_pdev *pdev;
3884 	struct dp_soc *soc;
3885 	struct dp_ast_entry *ast_entry;
3886 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
3887 
3888 	/* preconditions */
3889 	qdf_assert(vdev);
3890 	qdf_assert(peer_mac_addr);
3891 
3892 	pdev = vdev->pdev;
3893 	soc = pdev->soc;
3894 
3895 	/*
3896 	 * If a peer entry with given MAC address already exists,
3897 	 * reuse the peer and reset the state of peer.
3898 	 */
3899 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
3900 
3901 	if (peer) {
3902 		peer->delete_in_progress = false;
3903 
3904 		dp_peer_delete_ast_entries(soc, peer);
3905 
3906 		if ((vdev->opmode == wlan_op_mode_sta) &&
3907 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
3908 		     DP_MAC_ADDR_LEN)) {
3909 			ast_type = CDP_TXRX_AST_TYPE_SELF;
3910 		}
3911 
3912 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
3913 
3914 		/*
3915 		* Control path maintains a node count which is incremented
3916 		* for every new peer create command. Since new peer is not being
3917 		* created and earlier reference is reused here,
3918 		* peer_unref_delete event is sent to control path to
3919 		* increment the count back.
3920 		*/
3921 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
3922 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
3923 				vdev->vdev_id, peer->mac_addr.raw);
3924 		}
3925 		peer->ctrl_peer = ctrl_peer;
3926 
3927 		dp_local_peer_id_alloc(pdev, peer);
3928 		DP_STATS_INIT(peer);
3929 
3930 		return (void *)peer;
3931 	} else {
3932 		/*
3933 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
3934 		 * need to remove the AST entry which was earlier added as a WDS
3935 		 * entry.
3936 		 * If an AST entry exists, but no peer entry exists with a given
3937 		 * MAC addresses, we could deduce it as a WDS entry
3938 		 */
3939 		qdf_spin_lock_bh(&soc->ast_lock);
3940 		ast_entry = dp_peer_ast_hash_find(soc, peer_mac_addr);
3941 		if (ast_entry)
3942 			dp_peer_del_ast(soc, ast_entry);
3943 		qdf_spin_unlock_bh(&soc->ast_lock);
3944 	}
3945 
3946 #ifdef notyet
3947 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
3948 		soc->mempool_ol_ath_peer);
3949 #else
3950 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
3951 #endif
3952 
3953 	if (!peer)
3954 		return NULL; /* failure */
3955 
3956 	qdf_mem_zero(peer, sizeof(struct dp_peer));
3957 
3958 	TAILQ_INIT(&peer->ast_entry_list);
3959 
3960 	/* store provided params */
3961 	peer->vdev = vdev;
3962 	peer->ctrl_peer = ctrl_peer;
3963 
3964 	if ((vdev->opmode == wlan_op_mode_sta) &&
3965 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
3966 			 DP_MAC_ADDR_LEN)) {
3967 		ast_type = CDP_TXRX_AST_TYPE_SELF;
3968 	}
3969 
3970 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
3971 
3972 	qdf_spinlock_create(&peer->peer_info_lock);
3973 
3974 	qdf_mem_copy(
3975 		&peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3976 
3977 	/* TODO: See of rx_opt_proc is really required */
3978 	peer->rx_opt_proc = soc->rx_opt_proc;
3979 
3980 	/* initialize the peer_id */
3981 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3982 		peer->peer_ids[i] = HTT_INVALID_PEER;
3983 
3984 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3985 
3986 	qdf_atomic_init(&peer->ref_cnt);
3987 
3988 	/* keep one reference for attach */
3989 	qdf_atomic_inc(&peer->ref_cnt);
3990 
3991 	/* add this peer into the vdev's list */
3992 	if (wlan_op_mode_sta == vdev->opmode)
3993 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
3994 	else
3995 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
3996 
3997 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3998 
3999 	/* TODO: See if hash based search is required */
4000 	dp_peer_find_hash_add(soc, peer);
4001 
4002 	/* Initialize the peer state */
4003 	peer->state = OL_TXRX_PEER_STATE_DISC;
4004 
4005 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4006 		"vdev %pK created peer %pK (%pM) ref_cnt: %d",
4007 		vdev, peer, peer->mac_addr.raw,
4008 		qdf_atomic_read(&peer->ref_cnt));
4009 	/*
4010 	 * For every peer MAp message search and set if bss_peer
4011 	 */
4012 	if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
4013 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4014 			"vdev bss_peer!!!!");
4015 		peer->bss_peer = 1;
4016 		vdev->vap_bss_peer = peer;
4017 	}
4018 	for (i = 0; i < DP_MAX_TIDS; i++)
4019 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
4020 
4021 	dp_local_peer_id_alloc(pdev, peer);
4022 	DP_STATS_INIT(peer);
4023 	return (void *)peer;
4024 }
4025 
4026 /*
4027  * dp_peer_setup_wifi3() - initialize the peer
4028  * @vdev_hdl: virtual device object
4029  * @peer: Peer object
4030  *
4031  * Return: void
4032  */
4033 static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
4034 {
4035 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
4036 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4037 	struct dp_pdev *pdev;
4038 	struct dp_soc *soc;
4039 	bool hash_based = 0;
4040 	enum cdp_host_reo_dest_ring reo_dest;
4041 
4042 	/* preconditions */
4043 	qdf_assert(vdev);
4044 	qdf_assert(peer);
4045 
4046 	pdev = vdev->pdev;
4047 	soc = pdev->soc;
4048 
4049 	peer->last_assoc_rcvd = 0;
4050 	peer->last_disassoc_rcvd = 0;
4051 	peer->last_deauth_rcvd = 0;
4052 
4053 	/*
4054 	 * hash based steering is disabled for Radios which are offloaded
4055 	 * to NSS
4056 	 */
4057 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
4058 		hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
4059 
4060 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4061 		FL("hash based steering for pdev: %d is %d"),
4062 		pdev->pdev_id, hash_based);
4063 
4064 	/*
4065 	 * Below line of code will ensure the proper reo_dest ring is chosen
4066 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
4067 	 */
4068 	reo_dest = pdev->reo_dest;
4069 
4070 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
4071 		/* TODO: Check the destination ring number to be passed to FW */
4072 		soc->cdp_soc.ol_ops->peer_set_default_routing(
4073 				pdev->ctrl_pdev, peer->mac_addr.raw,
4074 				peer->vdev->vdev_id, hash_based, reo_dest);
4075 	}
4076 
4077 	dp_peer_rx_init(pdev, peer);
4078 	return;
4079 }
4080 
4081 /*
4082  * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
4083  * @vdev_handle: virtual device object
4084  * @htt_pkt_type: type of pkt
4085  *
4086  * Return: void
4087  */
4088 static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
4089 	 enum htt_cmn_pkt_type val)
4090 {
4091 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4092 	vdev->tx_encap_type = val;
4093 }
4094 
4095 /*
4096  * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
4097  * @vdev_handle: virtual device object
4098  * @htt_pkt_type: type of pkt
4099  *
4100  * Return: void
4101  */
4102 static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
4103 	 enum htt_cmn_pkt_type val)
4104 {
4105 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4106 	vdev->rx_decap_type = val;
4107 }
4108 
4109 /*
4110  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
4111  * @txrx_soc: cdp soc handle
4112  * @ac: Access category
4113  * @value: timeout value in millisec
4114  *
4115  * Return: void
4116  */
4117 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
4118 				    uint8_t ac, uint32_t value)
4119 {
4120 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4121 
4122 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
4123 }
4124 
4125 /*
4126  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
4127  * @txrx_soc: cdp soc handle
4128  * @ac: access category
4129  * @value: timeout value in millisec
4130  *
4131  * Return: void
4132  */
4133 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
4134 				    uint8_t ac, uint32_t *value)
4135 {
4136 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4137 
4138 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
4139 }
4140 
4141 /*
4142  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
4143  * @pdev_handle: physical device object
4144  * @val: reo destination ring index (1 - 4)
4145  *
4146  * Return: void
4147  */
4148 static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
4149 	 enum cdp_host_reo_dest_ring val)
4150 {
4151 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4152 
4153 	if (pdev)
4154 		pdev->reo_dest = val;
4155 }
4156 
4157 /*
4158  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
4159  * @pdev_handle: physical device object
4160  *
4161  * Return: reo destination ring index
4162  */
4163 static enum cdp_host_reo_dest_ring
4164 dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
4165 {
4166 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4167 
4168 	if (pdev)
4169 		return pdev->reo_dest;
4170 	else
4171 		return cdp_host_reo_dest_ring_unknown;
4172 }
4173 
4174 #ifdef QCA_SUPPORT_SON
4175 static void dp_son_peer_authorize(struct dp_peer *peer)
4176 {
4177 	struct dp_soc *soc;
4178 	soc = peer->vdev->pdev->soc;
4179 	peer->peer_bs_inact_flag = 0;
4180 	peer->peer_bs_inact = soc->pdev_bs_inact_reload;
4181 	return;
4182 }
4183 #else
4184 static void dp_son_peer_authorize(struct dp_peer *peer)
4185 {
4186 	return;
4187 }
4188 #endif
4189 /*
4190  * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
4191  * @pdev_handle: device object
4192  * @val: value to be set
4193  *
4194  * Return: void
4195  */
4196 static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
4197 	 uint32_t val)
4198 {
4199 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4200 
4201 	/* Enable/Disable smart mesh filtering. This flag will be checked
4202 	 * during rx processing to check if packets are from NAC clients.
4203 	 */
4204 	pdev->filter_neighbour_peers = val;
4205 	return 0;
4206 }
4207 
4208 /*
4209  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
4210  * address for smart mesh filtering
4211  * @vdev_handle: virtual device object
4212  * @cmd: Add/Del command
4213  * @macaddr: nac client mac address
4214  *
4215  * Return: void
4216  */
4217 static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
4218 					    uint32_t cmd, uint8_t *macaddr)
4219 {
4220 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4221 	struct dp_pdev *pdev = vdev->pdev;
4222 	struct dp_neighbour_peer *peer = NULL;
4223 
4224 	if (!macaddr)
4225 		goto fail0;
4226 
4227 	/* Store address of NAC (neighbour peer) which will be checked
4228 	 * against TA of received packets.
4229 	 */
4230 	if (cmd == DP_NAC_PARAM_ADD) {
4231 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
4232 				sizeof(*peer));
4233 
4234 		if (!peer) {
4235 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4236 				FL("DP neighbour peer node memory allocation failed"));
4237 			goto fail0;
4238 		}
4239 
4240 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
4241 			macaddr, DP_MAC_ADDR_LEN);
4242 		peer->vdev = vdev;
4243 
4244 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4245 
4246 		/* add this neighbour peer into the list */
4247 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
4248 				neighbour_peer_list_elem);
4249 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4250 
4251 		/* first neighbour */
4252 		if (!pdev->neighbour_peers_added) {
4253 			pdev->neighbour_peers_added = true;
4254 			dp_ppdu_ring_cfg(pdev);
4255 		}
4256 		return 1;
4257 
4258 	} else if (cmd == DP_NAC_PARAM_DEL) {
4259 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4260 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
4261 				neighbour_peer_list_elem) {
4262 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
4263 				macaddr, DP_MAC_ADDR_LEN)) {
4264 				/* delete this peer from the list */
4265 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
4266 					peer, neighbour_peer_list_elem);
4267 				qdf_mem_free(peer);
4268 				break;
4269 			}
4270 		}
4271 		/* last neighbour deleted */
4272 		if (TAILQ_EMPTY(&pdev->neighbour_peers_list))
4273 			pdev->neighbour_peers_added = false;
4274 
4275 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4276 
4277 		if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
4278 		    !pdev->enhanced_stats_en)
4279 			dp_ppdu_ring_reset(pdev);
4280 		return 1;
4281 
4282 	}
4283 
4284 fail0:
4285 	return 0;
4286 }
4287 
4288 /*
4289  * dp_get_sec_type() - Get the security type
4290  * @peer:		Datapath peer handle
4291  * @sec_idx:    Security id (mcast, ucast)
4292  *
4293  * return sec_type: Security type
4294  */
4295 static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
4296 {
4297 	struct dp_peer *dpeer = (struct dp_peer *)peer;
4298 
4299 	return dpeer->security[sec_idx].sec_type;
4300 }
4301 
4302 /*
4303  * dp_peer_authorize() - authorize txrx peer
4304  * @peer_handle:		Datapath peer handle
4305  * @authorize
4306  *
4307  */
4308 static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
4309 {
4310 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4311 	struct dp_soc *soc;
4312 
4313 	if (peer != NULL) {
4314 		soc = peer->vdev->pdev->soc;
4315 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
4316 		dp_son_peer_authorize(peer);
4317 		peer->authorize = authorize ? 1 : 0;
4318 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4319 	}
4320 }
4321 
4322 #ifdef QCA_SUPPORT_SON
4323 /*
4324  * dp_txrx_update_inact_threshold() - Update inact timer threshold
4325  * @pdev_handle: Device handle
4326  * @new_threshold : updated threshold value
4327  *
4328  */
4329 static void
4330 dp_txrx_update_inact_threshold(struct cdp_pdev *pdev_handle,
4331 			       u_int16_t new_threshold)
4332 {
4333 	struct dp_vdev *vdev;
4334 	struct dp_peer *peer;
4335 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4336 	struct dp_soc *soc = pdev->soc;
4337 	u_int16_t old_threshold = soc->pdev_bs_inact_reload;
4338 
4339 	if (old_threshold == new_threshold)
4340 		return;
4341 
4342 	soc->pdev_bs_inact_reload = new_threshold;
4343 
4344 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4345 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4346 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4347 		if (vdev->opmode != wlan_op_mode_ap)
4348 			continue;
4349 
4350 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4351 			if (!peer->authorize)
4352 				continue;
4353 
4354 			if (old_threshold - peer->peer_bs_inact >=
4355 					new_threshold) {
4356 				dp_mark_peer_inact((void *)peer, true);
4357 				peer->peer_bs_inact = 0;
4358 			} else {
4359 				peer->peer_bs_inact = new_threshold -
4360 					(old_threshold - peer->peer_bs_inact);
4361 			}
4362 		}
4363 	}
4364 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4365 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4366 }
4367 
4368 /**
4369  * dp_txrx_reset_inact_count(): Reset inact count
4370  * @pdev_handle - device handle
4371  *
4372  * Return: void
4373  */
4374 static void
4375 dp_txrx_reset_inact_count(struct cdp_pdev *pdev_handle)
4376 {
4377 	struct dp_vdev *vdev = NULL;
4378 	struct dp_peer *peer = NULL;
4379 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4380 	struct dp_soc *soc = pdev->soc;
4381 
4382 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4383 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4384 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4385 		if (vdev->opmode != wlan_op_mode_ap)
4386 			continue;
4387 
4388 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4389 			if (!peer->authorize)
4390 				continue;
4391 
4392 			peer->peer_bs_inact = soc->pdev_bs_inact_reload;
4393 		}
4394 	}
4395 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4396 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4397 }
4398 
4399 /**
4400  * dp_set_inact_params(): set inactivity params
4401  * @pdev_handle - device handle
4402  * @inact_check_interval - inactivity interval
4403  * @inact_normal - Inactivity normal
4404  * @inact_overload - Inactivity overload
4405  *
4406  * Return: bool
4407  */
4408 bool dp_set_inact_params(struct cdp_pdev *pdev_handle,
4409 			 u_int16_t inact_check_interval,
4410 			 u_int16_t inact_normal, u_int16_t inact_overload)
4411 {
4412 	struct dp_soc *soc;
4413 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4414 
4415 	if (!pdev)
4416 		return false;
4417 
4418 	soc = pdev->soc;
4419 	if (!soc)
4420 		return false;
4421 
4422 	soc->pdev_bs_inact_interval = inact_check_interval;
4423 	soc->pdev_bs_inact_normal = inact_normal;
4424 	soc->pdev_bs_inact_overload = inact_overload;
4425 
4426 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4427 					soc->pdev_bs_inact_normal);
4428 
4429 	return true;
4430 }
4431 
4432 /**
4433  * dp_start_inact_timer(): Inactivity timer start
4434  * @pdev_handle - device handle
4435  * @enable - Inactivity timer start/stop
4436  *
4437  * Return: bool
4438  */
4439 bool dp_start_inact_timer(struct cdp_pdev *pdev_handle, bool enable)
4440 {
4441 	struct dp_soc *soc;
4442 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4443 
4444 	if (!pdev)
4445 		return false;
4446 
4447 	soc = pdev->soc;
4448 	if (!soc)
4449 		return false;
4450 
4451 	if (enable) {
4452 		dp_txrx_reset_inact_count((struct cdp_pdev *)pdev);
4453 		qdf_timer_mod(&soc->pdev_bs_inact_timer,
4454 			      soc->pdev_bs_inact_interval * 1000);
4455 	} else {
4456 		qdf_timer_stop(&soc->pdev_bs_inact_timer);
4457 	}
4458 
4459 	return true;
4460 }
4461 
4462 /**
4463  * dp_set_overload(): Set inactivity overload
4464  * @pdev_handle - device handle
4465  * @overload - overload status
4466  *
4467  * Return: void
4468  */
4469 void dp_set_overload(struct cdp_pdev *pdev_handle, bool overload)
4470 {
4471 	struct dp_soc *soc;
4472 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4473 
4474 	if (!pdev)
4475 		return;
4476 
4477 	soc = pdev->soc;
4478 	if (!soc)
4479 		return;
4480 
4481 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4482 			overload ? soc->pdev_bs_inact_overload :
4483 			soc->pdev_bs_inact_normal);
4484 }
4485 
4486 /**
4487  * dp_peer_is_inact(): check whether peer is inactive
4488  * @peer_handle - datapath peer handle
4489  *
4490  * Return: bool
4491  */
4492 bool dp_peer_is_inact(void *peer_handle)
4493 {
4494 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4495 
4496 	if (!peer)
4497 		return false;
4498 
4499 	return peer->peer_bs_inact_flag == 1;
4500 }
4501 
4502 /**
4503  * dp_init_inact_timer: initialize the inact timer
4504  * @soc - SOC handle
4505  *
4506  * Return: void
4507  */
4508 void dp_init_inact_timer(struct dp_soc *soc)
4509 {
4510 	qdf_timer_init(soc->osdev, &soc->pdev_bs_inact_timer,
4511 		dp_txrx_peer_find_inact_timeout_handler,
4512 		(void *)soc, QDF_TIMER_TYPE_WAKE_APPS);
4513 }
4514 
4515 #else
4516 
4517 bool dp_set_inact_params(struct cdp_pdev *pdev, u_int16_t inact_check_interval,
4518 			 u_int16_t inact_normal, u_int16_t inact_overload)
4519 {
4520 	return false;
4521 }
4522 
4523 bool dp_start_inact_timer(struct cdp_pdev *pdev, bool enable)
4524 {
4525 	return false;
4526 }
4527 
4528 void dp_set_overload(struct cdp_pdev *pdev, bool overload)
4529 {
4530 	return;
4531 }
4532 
4533 void dp_init_inact_timer(struct dp_soc *soc)
4534 {
4535 	return;
4536 }
4537 
4538 bool dp_peer_is_inact(void *peer)
4539 {
4540 	return false;
4541 }
4542 #endif
4543 
4544 /*
4545  * dp_peer_unref_delete() - unref and delete peer
4546  * @peer_handle:		Datapath peer handle
4547  *
4548  */
4549 void dp_peer_unref_delete(void *peer_handle)
4550 {
4551 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4552 	struct dp_peer *bss_peer = NULL;
4553 	struct dp_vdev *vdev = peer->vdev;
4554 	struct dp_pdev *pdev = vdev->pdev;
4555 	struct dp_soc *soc = pdev->soc;
4556 	struct dp_peer *tmppeer;
4557 	int found = 0;
4558 	uint16_t peer_id;
4559 	uint16_t vdev_id;
4560 
4561 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4562 		  "%s: peer %pK ref_cnt(before decrement): %d\n", __func__,
4563 		  peer, qdf_atomic_read(&peer->ref_cnt));
4564 	/*
4565 	 * Hold the lock all the way from checking if the peer ref count
4566 	 * is zero until the peer references are removed from the hash
4567 	 * table and vdev list (if the peer ref count is zero).
4568 	 * This protects against a new HL tx operation starting to use the
4569 	 * peer object just after this function concludes it's done being used.
4570 	 * Furthermore, the lock needs to be held while checking whether the
4571 	 * vdev's list of peers is empty, to make sure that list is not modified
4572 	 * concurrently with the empty check.
4573 	 */
4574 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4575 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
4576 		peer_id = peer->peer_ids[0];
4577 		vdev_id = vdev->vdev_id;
4578 
4579 		/*
4580 		 * Make sure that the reference to the peer in
4581 		 * peer object map is removed
4582 		 */
4583 		if (peer_id != HTT_INVALID_PEER)
4584 			soc->peer_id_to_obj_map[peer_id] = NULL;
4585 
4586 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4587 			"Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
4588 
4589 		/* remove the reference to the peer from the hash table */
4590 		dp_peer_find_hash_remove(soc, peer);
4591 
4592 		qdf_spin_lock_bh(&soc->ast_lock);
4593 		if (peer->self_ast_entry) {
4594 			dp_peer_del_ast(soc, peer->self_ast_entry);
4595 			peer->self_ast_entry = NULL;
4596 		}
4597 		qdf_spin_unlock_bh(&soc->ast_lock);
4598 
4599 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
4600 			if (tmppeer == peer) {
4601 				found = 1;
4602 				break;
4603 			}
4604 		}
4605 		if (found) {
4606 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
4607 				peer_list_elem);
4608 		} else {
4609 			/*Ignoring the remove operation as peer not found*/
4610 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4611 				  "peer:%pK not found in vdev:%pK peerlist:%pK",
4612 				  peer, vdev, &peer->vdev->peer_list);
4613 		}
4614 
4615 		/* cleanup the peer data */
4616 		dp_peer_cleanup(vdev, peer);
4617 
4618 		/* check whether the parent vdev has no peers left */
4619 		if (TAILQ_EMPTY(&vdev->peer_list)) {
4620 			/*
4621 			 * Now that there are no references to the peer, we can
4622 			 * release the peer reference lock.
4623 			 */
4624 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4625 			/*
4626 			 * Check if the parent vdev was waiting for its peers
4627 			 * to be deleted, in order for it to be deleted too.
4628 			 */
4629 			if (vdev->delete.pending) {
4630 				ol_txrx_vdev_delete_cb vdev_delete_cb =
4631 					vdev->delete.callback;
4632 				void *vdev_delete_context =
4633 					vdev->delete.context;
4634 
4635 				QDF_TRACE(QDF_MODULE_ID_DP,
4636 					QDF_TRACE_LEVEL_INFO_HIGH,
4637 					FL("deleting vdev object %pK (%pM)"
4638 					" - its last peer is done"),
4639 					vdev, vdev->mac_addr.raw);
4640 				/* all peers are gone, go ahead and delete it */
4641 				dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
4642 								FLOW_TYPE_VDEV,
4643 								vdev_id);
4644 				dp_tx_vdev_detach(vdev);
4645 				QDF_TRACE(QDF_MODULE_ID_DP,
4646 					QDF_TRACE_LEVEL_INFO_HIGH,
4647 					FL("deleting vdev object %pK (%pM)"),
4648 					vdev, vdev->mac_addr.raw);
4649 
4650 				qdf_mem_free(vdev);
4651 				vdev = NULL;
4652 				if (vdev_delete_cb)
4653 					vdev_delete_cb(vdev_delete_context);
4654 			}
4655 		} else {
4656 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4657 		}
4658 
4659 		if (vdev) {
4660 			if (vdev->vap_bss_peer == peer) {
4661 				vdev->vap_bss_peer = NULL;
4662 			}
4663 		}
4664 
4665 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
4666 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
4667 					vdev_id, peer->mac_addr.raw);
4668 		}
4669 
4670 		if (!vdev || !vdev->vap_bss_peer) {
4671 			goto free_peer;
4672 		}
4673 
4674 #ifdef notyet
4675 		qdf_mempool_free(soc->osdev, soc->mempool_ol_ath_peer, peer);
4676 #else
4677 		bss_peer = vdev->vap_bss_peer;
4678 		DP_UPDATE_STATS(vdev, peer);
4679 
4680 free_peer:
4681 		qdf_mem_free(peer);
4682 
4683 #endif
4684 	} else {
4685 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4686 	}
4687 }
4688 
4689 /*
4690  * dp_peer_detach_wifi3() – Detach txrx peer
4691  * @peer_handle: Datapath peer handle
4692  * @bitmap: bitmap indicating special handling of request.
4693  *
4694  */
4695 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
4696 {
4697 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4698 
4699 	/* redirect the peer's rx delivery function to point to a
4700 	 * discard func
4701 	 */
4702 
4703 	peer->rx_opt_proc = dp_rx_discard;
4704 	peer->ctrl_peer = NULL;
4705 
4706 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4707 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
4708 
4709 	dp_local_peer_id_free(peer->vdev->pdev, peer);
4710 	qdf_spinlock_destroy(&peer->peer_info_lock);
4711 
4712 	/*
4713 	 * Remove the reference added during peer_attach.
4714 	 * The peer will still be left allocated until the
4715 	 * PEER_UNMAP message arrives to remove the other
4716 	 * reference, added by the PEER_MAP message.
4717 	 */
4718 	dp_peer_unref_delete(peer_handle);
4719 }
4720 
4721 /*
4722  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
4723  * @peer_handle:		Datapath peer handle
4724  *
4725  */
4726 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
4727 {
4728 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4729 	return vdev->mac_addr.raw;
4730 }
4731 
4732 /*
4733  * dp_vdev_set_wds() - Enable per packet stats
4734  * @vdev_handle: DP VDEV handle
4735  * @val: value
4736  *
4737  * Return: none
4738  */
4739 static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
4740 {
4741 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4742 
4743 	vdev->wds_enabled = val;
4744 	return 0;
4745 }
4746 
4747 /*
4748  * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
4749  * @peer_handle:		Datapath peer handle
4750  *
4751  */
4752 static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
4753 						uint8_t vdev_id)
4754 {
4755 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
4756 	struct dp_vdev *vdev = NULL;
4757 
4758 	if (qdf_unlikely(!pdev))
4759 		return NULL;
4760 
4761 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4762 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4763 		if (vdev->vdev_id == vdev_id)
4764 			break;
4765 	}
4766 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4767 
4768 	return (struct cdp_vdev *)vdev;
4769 }
4770 
4771 static int dp_get_opmode(struct cdp_vdev *vdev_handle)
4772 {
4773 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4774 
4775 	return vdev->opmode;
4776 }
4777 
4778 static
4779 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_vdev *pvdev,
4780 					  ol_txrx_rx_fp *stack_fn_p,
4781 					  ol_osif_vdev_handle *osif_vdev_p)
4782 {
4783 	struct dp_vdev *vdev = dp_get_dp_vdev_from_cdp_vdev(pvdev);
4784 
4785 	qdf_assert(vdev);
4786 	*stack_fn_p = vdev->osif_rx_stack;
4787 	*osif_vdev_p = vdev->osif_vdev;
4788 }
4789 
4790 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
4791 {
4792 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4793 	struct dp_pdev *pdev = vdev->pdev;
4794 
4795 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
4796 }
4797 
4798 /**
4799  * dp_reset_monitor_mode() - Disable monitor mode
4800  * @pdev_handle: Datapath PDEV handle
4801  *
4802  * Return: 0 on success, not 0 on failure
4803  */
4804 static int dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
4805 {
4806 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4807 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4808 	struct dp_soc *soc = pdev->soc;
4809 	uint8_t pdev_id;
4810 	int mac_id;
4811 
4812 	pdev_id = pdev->pdev_id;
4813 	soc = pdev->soc;
4814 
4815 	qdf_spin_lock_bh(&pdev->mon_lock);
4816 
4817 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4818 
4819 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4820 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4821 
4822 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4823 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4824 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4825 
4826 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4827 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4828 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4829 	}
4830 
4831 	pdev->monitor_vdev = NULL;
4832 
4833 	qdf_spin_unlock_bh(&pdev->mon_lock);
4834 
4835 	return 0;
4836 }
4837 
4838 /**
4839  * dp_set_nac() - set peer_nac
4840  * @peer_handle: Datapath PEER handle
4841  *
4842  * Return: void
4843  */
4844 static void dp_set_nac(struct cdp_peer *peer_handle)
4845 {
4846 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4847 
4848 	peer->nac = 1;
4849 }
4850 
4851 /**
4852  * dp_get_tx_pending() - read pending tx
4853  * @pdev_handle: Datapath PDEV handle
4854  *
4855  * Return: outstanding tx
4856  */
4857 static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
4858 {
4859 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4860 
4861 	return qdf_atomic_read(&pdev->num_tx_outstanding);
4862 }
4863 
4864 /**
4865  * dp_get_peer_mac_from_peer_id() - get peer mac
4866  * @pdev_handle: Datapath PDEV handle
4867  * @peer_id: Peer ID
4868  * @peer_mac: MAC addr of PEER
4869  *
4870  * Return: void
4871  */
4872 static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
4873 	uint32_t peer_id, uint8_t *peer_mac)
4874 {
4875 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4876 	struct dp_peer *peer;
4877 
4878 	if (pdev && peer_mac) {
4879 		peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
4880 		if (peer && peer->mac_addr.raw) {
4881 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
4882 					DP_MAC_ADDR_LEN);
4883 		}
4884 	}
4885 }
4886 
4887 /**
4888  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
4889  * @vdev_handle: Datapath VDEV handle
4890  * @smart_monitor: Flag to denote if its smart monitor mode
4891  *
4892  * Return: 0 on success, not 0 on failure
4893  */
4894 static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
4895 		uint8_t smart_monitor)
4896 {
4897 	/* Many monitor VAPs can exists in a system but only one can be up at
4898 	 * anytime
4899 	 */
4900 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4901 	struct dp_pdev *pdev;
4902 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4903 	struct dp_soc *soc;
4904 	uint8_t pdev_id;
4905 	int mac_id;
4906 
4907 	qdf_assert(vdev);
4908 
4909 	pdev = vdev->pdev;
4910 	pdev_id = pdev->pdev_id;
4911 	soc = pdev->soc;
4912 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4913 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
4914 		pdev, pdev_id, soc, vdev);
4915 
4916 	/*Check if current pdev's monitor_vdev exists */
4917 	if (pdev->monitor_vdev) {
4918 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4919 			"vdev=%pK", vdev);
4920 		qdf_assert(vdev);
4921 	}
4922 
4923 	pdev->monitor_vdev = vdev;
4924 
4925 	/* If smart monitor mode, do not configure monitor ring */
4926 	if (smart_monitor)
4927 		return QDF_STATUS_SUCCESS;
4928 
4929 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4930 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
4931 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4932 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4933 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4934 		pdev->mo_data_filter);
4935 
4936 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4937 
4938 	htt_tlv_filter.mpdu_start = 1;
4939 	htt_tlv_filter.msdu_start = 1;
4940 	htt_tlv_filter.packet = 1;
4941 	htt_tlv_filter.msdu_end = 1;
4942 	htt_tlv_filter.mpdu_end = 1;
4943 	htt_tlv_filter.packet_header = 1;
4944 	htt_tlv_filter.attention = 1;
4945 	htt_tlv_filter.ppdu_start = 0;
4946 	htt_tlv_filter.ppdu_end = 0;
4947 	htt_tlv_filter.ppdu_end_user_stats = 0;
4948 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4949 	htt_tlv_filter.ppdu_end_status_done = 0;
4950 	htt_tlv_filter.header_per_msdu = 1;
4951 	htt_tlv_filter.enable_fp =
4952 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4953 	htt_tlv_filter.enable_md = 0;
4954 	htt_tlv_filter.enable_mo =
4955 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4956 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4957 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4958 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4959 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4960 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4961 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4962 
4963 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4964 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4965 
4966 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4967 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4968 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4969 	}
4970 
4971 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4972 
4973 	htt_tlv_filter.mpdu_start = 1;
4974 	htt_tlv_filter.msdu_start = 0;
4975 	htt_tlv_filter.packet = 0;
4976 	htt_tlv_filter.msdu_end = 0;
4977 	htt_tlv_filter.mpdu_end = 0;
4978 	htt_tlv_filter.attention = 0;
4979 	htt_tlv_filter.ppdu_start = 1;
4980 	htt_tlv_filter.ppdu_end = 1;
4981 	htt_tlv_filter.ppdu_end_user_stats = 1;
4982 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4983 	htt_tlv_filter.ppdu_end_status_done = 1;
4984 	htt_tlv_filter.enable_fp = 1;
4985 	htt_tlv_filter.enable_md = 0;
4986 	htt_tlv_filter.enable_mo = 1;
4987 	if (pdev->mcopy_mode) {
4988 		htt_tlv_filter.packet_header = 1;
4989 	}
4990 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
4991 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
4992 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
4993 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
4994 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
4995 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
4996 
4997 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4998 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4999 						pdev->pdev_id);
5000 
5001 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5002 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5003 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5004 	}
5005 
5006 	return QDF_STATUS_SUCCESS;
5007 }
5008 
5009 /**
5010  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
5011  * @pdev_handle: Datapath PDEV handle
5012  * @filter_val: Flag to select Filter for monitor mode
5013  * Return: 0 on success, not 0 on failure
5014  */
5015 static int dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
5016 	struct cdp_monitor_filter *filter_val)
5017 {
5018 	/* Many monitor VAPs can exists in a system but only one can be up at
5019 	 * anytime
5020 	 */
5021 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5022 	struct dp_vdev *vdev = pdev->monitor_vdev;
5023 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5024 	struct dp_soc *soc;
5025 	uint8_t pdev_id;
5026 	int mac_id;
5027 
5028 	pdev_id = pdev->pdev_id;
5029 	soc = pdev->soc;
5030 
5031 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
5032 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
5033 		pdev, pdev_id, soc, vdev);
5034 
5035 	/*Check if current pdev's monitor_vdev exists */
5036 	if (!pdev->monitor_vdev) {
5037 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5038 			"vdev=%pK", vdev);
5039 		qdf_assert(vdev);
5040 	}
5041 
5042 	/* update filter mode, type in pdev structure */
5043 	pdev->mon_filter_mode = filter_val->mode;
5044 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
5045 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
5046 	pdev->fp_data_filter = filter_val->fp_data;
5047 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
5048 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
5049 	pdev->mo_data_filter = filter_val->mo_data;
5050 
5051 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5052 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
5053 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
5054 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
5055 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
5056 		pdev->mo_data_filter);
5057 
5058 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5059 
5060 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5061 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5062 
5063 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5064 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
5065 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
5066 
5067 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5068 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5069 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5070 	}
5071 
5072 	htt_tlv_filter.mpdu_start = 1;
5073 	htt_tlv_filter.msdu_start = 1;
5074 	htt_tlv_filter.packet = 1;
5075 	htt_tlv_filter.msdu_end = 1;
5076 	htt_tlv_filter.mpdu_end = 1;
5077 	htt_tlv_filter.packet_header = 1;
5078 	htt_tlv_filter.attention = 1;
5079 	htt_tlv_filter.ppdu_start = 0;
5080 	htt_tlv_filter.ppdu_end = 0;
5081 	htt_tlv_filter.ppdu_end_user_stats = 0;
5082 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
5083 	htt_tlv_filter.ppdu_end_status_done = 0;
5084 	htt_tlv_filter.header_per_msdu = 1;
5085 	htt_tlv_filter.enable_fp =
5086 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
5087 	htt_tlv_filter.enable_md = 0;
5088 	htt_tlv_filter.enable_mo =
5089 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
5090 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
5091 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
5092 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
5093 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
5094 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
5095 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
5096 
5097 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5098 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5099 
5100 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5101 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
5102 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
5103 	}
5104 
5105 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5106 
5107 	htt_tlv_filter.mpdu_start = 1;
5108 	htt_tlv_filter.msdu_start = 0;
5109 	htt_tlv_filter.packet = 0;
5110 	htt_tlv_filter.msdu_end = 0;
5111 	htt_tlv_filter.mpdu_end = 0;
5112 	htt_tlv_filter.attention = 0;
5113 	htt_tlv_filter.ppdu_start = 1;
5114 	htt_tlv_filter.ppdu_end = 1;
5115 	htt_tlv_filter.ppdu_end_user_stats = 1;
5116 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5117 	htt_tlv_filter.ppdu_end_status_done = 1;
5118 	htt_tlv_filter.enable_fp = 1;
5119 	htt_tlv_filter.enable_md = 0;
5120 	htt_tlv_filter.enable_mo = 1;
5121 	if (pdev->mcopy_mode) {
5122 		htt_tlv_filter.packet_header = 1;
5123 	}
5124 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5125 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5126 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5127 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5128 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5129 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
5130 
5131 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5132 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5133 						pdev->pdev_id);
5134 
5135 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5136 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5137 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5138 	}
5139 
5140 	return QDF_STATUS_SUCCESS;
5141 }
5142 
5143 /**
5144  * dp_get_pdev_id_frm_pdev() - get pdev_id
5145  * @pdev_handle: Datapath PDEV handle
5146  *
5147  * Return: pdev_id
5148  */
5149 static
5150 uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
5151 {
5152 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5153 
5154 	return pdev->pdev_id;
5155 }
5156 
5157 /**
5158  * dp_pdev_set_chan_noise_floor() - set channel noise floor
5159  * @pdev_handle: Datapath PDEV handle
5160  * @chan_noise_floor: Channel Noise Floor
5161  *
5162  * Return: void
5163  */
5164 static
5165 void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
5166 				  int16_t chan_noise_floor)
5167 {
5168 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5169 
5170 	pdev->chan_noise_floor = chan_noise_floor;
5171 }
5172 
5173 /**
5174  * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
5175  * @vdev_handle: Datapath VDEV handle
5176  * Return: true on ucast filter flag set
5177  */
5178 static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
5179 {
5180 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5181 	struct dp_pdev *pdev;
5182 
5183 	pdev = vdev->pdev;
5184 
5185 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
5186 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
5187 		return true;
5188 
5189 	return false;
5190 }
5191 
5192 /**
5193  * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
5194  * @vdev_handle: Datapath VDEV handle
5195  * Return: true on mcast filter flag set
5196  */
5197 static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
5198 {
5199 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5200 	struct dp_pdev *pdev;
5201 
5202 	pdev = vdev->pdev;
5203 
5204 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
5205 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
5206 		return true;
5207 
5208 	return false;
5209 }
5210 
5211 /**
5212  * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
5213  * @vdev_handle: Datapath VDEV handle
5214  * Return: true on non data filter flag set
5215  */
5216 static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
5217 {
5218 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5219 	struct dp_pdev *pdev;
5220 
5221 	pdev = vdev->pdev;
5222 
5223 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
5224 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
5225 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
5226 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
5227 			return true;
5228 		}
5229 	}
5230 
5231 	return false;
5232 }
5233 
5234 #ifdef MESH_MODE_SUPPORT
5235 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
5236 {
5237 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5238 
5239 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5240 		FL("val %d"), val);
5241 	vdev->mesh_vdev = val;
5242 }
5243 
5244 /*
5245  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
5246  * @vdev_hdl: virtual device object
5247  * @val: value to be set
5248  *
5249  * Return: void
5250  */
5251 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
5252 {
5253 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5254 
5255 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5256 		FL("val %d"), val);
5257 	vdev->mesh_rx_filter = val;
5258 }
5259 #endif
5260 
5261 /*
5262  * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
5263  * Current scope is bar received count
5264  *
5265  * @pdev_handle: DP_PDEV handle
5266  *
5267  * Return: void
5268  */
5269 #define STATS_PROC_TIMEOUT        (HZ/1000)
5270 
5271 static void
5272 dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
5273 {
5274 	struct dp_vdev *vdev;
5275 	struct dp_peer *peer;
5276 	uint32_t waitcnt;
5277 
5278 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5279 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5280 			if (!peer) {
5281 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5282 					FL("DP Invalid Peer refernce"));
5283 				return;
5284 			}
5285 
5286 			if (peer->delete_in_progress) {
5287 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5288 					FL("DP Peer deletion in progress"));
5289 				continue;
5290 			}
5291 
5292 			qdf_atomic_inc(&peer->ref_cnt);
5293 			waitcnt = 0;
5294 			dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
5295 			while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
5296 				&& waitcnt < 10) {
5297 				schedule_timeout_interruptible(
5298 						STATS_PROC_TIMEOUT);
5299 				waitcnt++;
5300 			}
5301 			qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
5302 			dp_peer_unref_delete(peer);
5303 		}
5304 	}
5305 }
5306 
5307 /**
5308  * dp_rx_bar_stats_cb(): BAR received stats callback
5309  * @soc: SOC handle
5310  * @cb_ctxt: Call back context
5311  * @reo_status: Reo status
5312  *
5313  * return: void
5314  */
5315 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
5316 	union hal_reo_status *reo_status)
5317 {
5318 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
5319 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
5320 
5321 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
5322 		DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
5323 			queue_status->header.status);
5324 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
5325 		return;
5326 	}
5327 
5328 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
5329 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
5330 
5331 }
5332 
5333 /**
5334  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
5335  * @vdev: DP VDEV handle
5336  *
5337  * return: void
5338  */
5339 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
5340 			     struct cdp_vdev_stats *vdev_stats)
5341 {
5342 	struct dp_peer *peer = NULL;
5343 	struct dp_soc *soc = vdev->pdev->soc;
5344 
5345 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
5346 
5347 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
5348 		dp_update_vdev_stats(vdev_stats, peer);
5349 
5350 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5351 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
5352 			&vdev->stats, (uint16_t) vdev->vdev_id,
5353 			UPDATE_VDEV_STATS);
5354 
5355 }
5356 
5357 /**
5358  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
5359  * @pdev: DP PDEV handle
5360  *
5361  * return: void
5362  */
5363 static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
5364 {
5365 	struct dp_vdev *vdev = NULL;
5366 	struct dp_soc *soc = pdev->soc;
5367 	struct cdp_vdev_stats *vdev_stats =
5368 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
5369 
5370 	if (!vdev_stats) {
5371 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5372 			  "DP alloc failure - unable to get alloc vdev stats");
5373 		return;
5374 	}
5375 
5376 	qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
5377 	qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
5378 	qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
5379 
5380 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5381 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5382 
5383 		dp_aggregate_vdev_stats(vdev, vdev_stats);
5384 		dp_update_pdev_stats(pdev, vdev_stats);
5385 
5386 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
5387 
5388 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
5389 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
5390 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
5391 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
5392 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
5393 		DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
5394 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
5395 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host.num);
5396 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
5397 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host.num);
5398 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
5399 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
5400 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
5401 		DP_STATS_AGGR(pdev, vdev,
5402 				tx_i.mcast_en.dropped_map_error);
5403 		DP_STATS_AGGR(pdev, vdev,
5404 				tx_i.mcast_en.dropped_self_mac);
5405 		DP_STATS_AGGR(pdev, vdev,
5406 				tx_i.mcast_en.dropped_send_fail);
5407 		DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
5408 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
5409 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
5410 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
5411 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na.num);
5412 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
5413 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified);
5414 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw);
5415 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.exception_fw);
5416 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.completion_fw);
5417 
5418 		pdev->stats.tx_i.dropped.dropped_pkt.num =
5419 			pdev->stats.tx_i.dropped.dma_error +
5420 			pdev->stats.tx_i.dropped.ring_full +
5421 			pdev->stats.tx_i.dropped.enqueue_fail +
5422 			pdev->stats.tx_i.dropped.desc_na.num +
5423 			pdev->stats.tx_i.dropped.res_full;
5424 
5425 		pdev->stats.tx.last_ack_rssi =
5426 			vdev->stats.tx.last_ack_rssi;
5427 		pdev->stats.tx_i.tso.num_seg =
5428 			vdev->stats.tx_i.tso.num_seg;
5429 	}
5430 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5431 	qdf_mem_free(vdev_stats);
5432 
5433 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5434 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
5435 				&pdev->stats, pdev->pdev_id, UPDATE_PDEV_STATS);
5436 
5437 }
5438 
5439 /**
5440  * dp_vdev_getstats() - get vdev packet level stats
5441  * @vdev_handle: Datapath VDEV handle
5442  * @stats: cdp network device stats structure
5443  *
5444  * Return: void
5445  */
5446 static void dp_vdev_getstats(void *vdev_handle,
5447 		struct cdp_dev_stats *stats)
5448 {
5449 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5450 	struct cdp_vdev_stats *vdev_stats =
5451 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
5452 
5453 	if (!vdev_stats) {
5454 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5455 			  "DP alloc failure - unable to get alloc vdev stats");
5456 		return;
5457 	}
5458 
5459 	dp_aggregate_vdev_stats(vdev, vdev_stats);
5460 
5461 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
5462 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
5463 
5464 	stats->tx_errors = vdev_stats->tx.tx_failed +
5465 		vdev_stats->tx_i.dropped.dropped_pkt.num;
5466 	stats->tx_dropped = stats->tx_errors;
5467 
5468 	stats->rx_packets = vdev_stats->rx.unicast.num +
5469 		vdev_stats->rx.multicast.num +
5470 		vdev_stats->rx.bcast.num;
5471 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
5472 		vdev_stats->rx.multicast.bytes +
5473 		vdev_stats->rx.bcast.bytes;
5474 
5475 }
5476 
5477 
5478 /**
5479  * dp_pdev_getstats() - get pdev packet level stats
5480  * @pdev_handle: Datapath PDEV handle
5481  * @stats: cdp network device stats structure
5482  *
5483  * Return: void
5484  */
5485 static void dp_pdev_getstats(void *pdev_handle,
5486 		struct cdp_dev_stats *stats)
5487 {
5488 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5489 
5490 	dp_aggregate_pdev_stats(pdev);
5491 
5492 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
5493 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
5494 
5495 	stats->tx_errors = pdev->stats.tx.tx_failed +
5496 		pdev->stats.tx_i.dropped.dropped_pkt.num;
5497 	stats->tx_dropped = stats->tx_errors;
5498 
5499 	stats->rx_packets = pdev->stats.rx.unicast.num +
5500 		pdev->stats.rx.multicast.num +
5501 		pdev->stats.rx.bcast.num;
5502 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
5503 		pdev->stats.rx.multicast.bytes +
5504 		pdev->stats.rx.bcast.bytes;
5505 }
5506 
5507 /**
5508  * dp_get_device_stats() - get interface level packet stats
5509  * @handle: device handle
5510  * @stats: cdp network device stats structure
5511  * @type: device type pdev/vdev
5512  *
5513  * Return: void
5514  */
5515 static void dp_get_device_stats(void *handle,
5516 		struct cdp_dev_stats *stats, uint8_t type)
5517 {
5518 	switch (type) {
5519 	case UPDATE_VDEV_STATS:
5520 		dp_vdev_getstats(handle, stats);
5521 		break;
5522 	case UPDATE_PDEV_STATS:
5523 		dp_pdev_getstats(handle, stats);
5524 		break;
5525 	default:
5526 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5527 			"apstats cannot be updated for this input "
5528 			"type %d", type);
5529 		break;
5530 	}
5531 
5532 }
5533 
5534 
5535 /**
5536  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
5537  * @pdev: DP_PDEV Handle
5538  *
5539  * Return:void
5540  */
5541 static inline void
5542 dp_print_pdev_tx_stats(struct dp_pdev *pdev)
5543 {
5544 	uint8_t index = 0;
5545 	DP_PRINT_STATS("PDEV Tx Stats:\n");
5546 	DP_PRINT_STATS("Received From Stack:");
5547 	DP_PRINT_STATS("	Packets = %d",
5548 			pdev->stats.tx_i.rcvd.num);
5549 	DP_PRINT_STATS("	Bytes = %llu",
5550 			pdev->stats.tx_i.rcvd.bytes);
5551 	DP_PRINT_STATS("Processed:");
5552 	DP_PRINT_STATS("	Packets = %d",
5553 			pdev->stats.tx_i.processed.num);
5554 	DP_PRINT_STATS("	Bytes = %llu",
5555 			pdev->stats.tx_i.processed.bytes);
5556 	DP_PRINT_STATS("Total Completions:");
5557 	DP_PRINT_STATS("	Packets = %u",
5558 			pdev->stats.tx.comp_pkt.num);
5559 	DP_PRINT_STATS("	Bytes = %llu",
5560 			pdev->stats.tx.comp_pkt.bytes);
5561 	DP_PRINT_STATS("Successful Completions:");
5562 	DP_PRINT_STATS("	Packets = %u",
5563 			pdev->stats.tx.tx_success.num);
5564 	DP_PRINT_STATS("	Bytes = %llu",
5565 			pdev->stats.tx.tx_success.bytes);
5566 	DP_PRINT_STATS("Dropped:");
5567 	DP_PRINT_STATS("	Total = %d",
5568 			pdev->stats.tx_i.dropped.dropped_pkt.num);
5569 	DP_PRINT_STATS("	Dma_map_error = %d",
5570 			pdev->stats.tx_i.dropped.dma_error);
5571 	DP_PRINT_STATS("	Ring Full = %d",
5572 			pdev->stats.tx_i.dropped.ring_full);
5573 	DP_PRINT_STATS("	Descriptor Not available = %d",
5574 			pdev->stats.tx_i.dropped.desc_na.num);
5575 	DP_PRINT_STATS("	HW enqueue failed= %d",
5576 			pdev->stats.tx_i.dropped.enqueue_fail);
5577 	DP_PRINT_STATS("	Resources Full = %d",
5578 			pdev->stats.tx_i.dropped.res_full);
5579 	DP_PRINT_STATS("	FW removed = %d",
5580 			pdev->stats.tx.dropped.fw_rem);
5581 	DP_PRINT_STATS("	FW removed transmitted = %d",
5582 			pdev->stats.tx.dropped.fw_rem_tx);
5583 	DP_PRINT_STATS("	FW removed untransmitted = %d",
5584 			pdev->stats.tx.dropped.fw_rem_notx);
5585 	DP_PRINT_STATS("	FW removed untransmitted fw_reason1 = %d",
5586 			pdev->stats.tx.dropped.fw_reason1);
5587 	DP_PRINT_STATS("	FW removed untransmitted fw_reason2 = %d",
5588 			pdev->stats.tx.dropped.fw_reason2);
5589 	DP_PRINT_STATS("	FW removed untransmitted fw_reason3 = %d",
5590 			pdev->stats.tx.dropped.fw_reason3);
5591 	DP_PRINT_STATS("	Aged Out from msdu/mpdu queues = %d",
5592 			pdev->stats.tx.dropped.age_out);
5593 	DP_PRINT_STATS("	Multicast:");
5594 	DP_PRINT_STATS("	Packets: %u",
5595 		       pdev->stats.tx.mcast.num);
5596 	DP_PRINT_STATS("	Bytes: %llu",
5597 		       pdev->stats.tx.mcast.bytes);
5598 	DP_PRINT_STATS("Scatter Gather:");
5599 	DP_PRINT_STATS("	Packets = %d",
5600 			pdev->stats.tx_i.sg.sg_pkt.num);
5601 	DP_PRINT_STATS("	Bytes = %llu",
5602 			pdev->stats.tx_i.sg.sg_pkt.bytes);
5603 	DP_PRINT_STATS("	Dropped By Host = %d",
5604 			pdev->stats.tx_i.sg.dropped_host.num);
5605 	DP_PRINT_STATS("	Dropped By Target = %d",
5606 			pdev->stats.tx_i.sg.dropped_target);
5607 	DP_PRINT_STATS("TSO:");
5608 	DP_PRINT_STATS("	Number of Segments = %d",
5609 			pdev->stats.tx_i.tso.num_seg);
5610 	DP_PRINT_STATS("	Packets = %d",
5611 			pdev->stats.tx_i.tso.tso_pkt.num);
5612 	DP_PRINT_STATS("	Bytes = %llu",
5613 			pdev->stats.tx_i.tso.tso_pkt.bytes);
5614 	DP_PRINT_STATS("	Dropped By Host = %d",
5615 			pdev->stats.tx_i.tso.dropped_host.num);
5616 	DP_PRINT_STATS("Mcast Enhancement:");
5617 	DP_PRINT_STATS("	Packets = %d",
5618 			pdev->stats.tx_i.mcast_en.mcast_pkt.num);
5619 	DP_PRINT_STATS("	Bytes = %llu",
5620 			pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
5621 	DP_PRINT_STATS("	Dropped: Map Errors = %d",
5622 			pdev->stats.tx_i.mcast_en.dropped_map_error);
5623 	DP_PRINT_STATS("	Dropped: Self Mac = %d",
5624 			pdev->stats.tx_i.mcast_en.dropped_self_mac);
5625 	DP_PRINT_STATS("	Dropped: Send Fail = %d",
5626 			pdev->stats.tx_i.mcast_en.dropped_send_fail);
5627 	DP_PRINT_STATS("	Unicast sent = %d",
5628 			pdev->stats.tx_i.mcast_en.ucast);
5629 	DP_PRINT_STATS("Raw:");
5630 	DP_PRINT_STATS("	Packets = %d",
5631 			pdev->stats.tx_i.raw.raw_pkt.num);
5632 	DP_PRINT_STATS("	Bytes = %llu",
5633 			pdev->stats.tx_i.raw.raw_pkt.bytes);
5634 	DP_PRINT_STATS("	DMA map error = %d",
5635 			pdev->stats.tx_i.raw.dma_map_error);
5636 	DP_PRINT_STATS("Reinjected:");
5637 	DP_PRINT_STATS("	Packets = %d",
5638 			pdev->stats.tx_i.reinject_pkts.num);
5639 	DP_PRINT_STATS("	Bytes = %llu\n",
5640 			pdev->stats.tx_i.reinject_pkts.bytes);
5641 	DP_PRINT_STATS("Inspected:");
5642 	DP_PRINT_STATS("	Packets = %d",
5643 			pdev->stats.tx_i.inspect_pkts.num);
5644 	DP_PRINT_STATS("	Bytes = %llu",
5645 			pdev->stats.tx_i.inspect_pkts.bytes);
5646 	DP_PRINT_STATS("Nawds Multicast:");
5647 	DP_PRINT_STATS("	Packets = %d",
5648 			pdev->stats.tx_i.nawds_mcast.num);
5649 	DP_PRINT_STATS("	Bytes = %llu",
5650 			pdev->stats.tx_i.nawds_mcast.bytes);
5651 	DP_PRINT_STATS("CCE Classified:");
5652 	DP_PRINT_STATS("	CCE Classified Packets: %u",
5653 			pdev->stats.tx_i.cce_classified);
5654 	DP_PRINT_STATS("	RAW CCE Classified Packets: %u",
5655 			pdev->stats.tx_i.cce_classified_raw);
5656 	DP_PRINT_STATS("Mesh stats:");
5657 	DP_PRINT_STATS("	frames to firmware: %u",
5658 			pdev->stats.tx_i.mesh.exception_fw);
5659 	DP_PRINT_STATS("	completions from fw: %u",
5660 			pdev->stats.tx_i.mesh.completion_fw);
5661 	DP_PRINT_STATS("PPDU stats counter");
5662 	for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
5663 		DP_PRINT_STATS("	Tag[%d] = %llu", index,
5664 				pdev->stats.ppdu_stats_counter[index]);
5665 	}
5666 }
5667 
5668 /**
5669  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
5670  * @pdev: DP_PDEV Handle
5671  *
5672  * Return: void
5673  */
5674 static inline void
5675 dp_print_pdev_rx_stats(struct dp_pdev *pdev)
5676 {
5677 	DP_PRINT_STATS("PDEV Rx Stats:\n");
5678 	DP_PRINT_STATS("Received From HW (Per Rx Ring):");
5679 	DP_PRINT_STATS("	Packets = %d %d %d %d",
5680 			pdev->stats.rx.rcvd_reo[0].num,
5681 			pdev->stats.rx.rcvd_reo[1].num,
5682 			pdev->stats.rx.rcvd_reo[2].num,
5683 			pdev->stats.rx.rcvd_reo[3].num);
5684 	DP_PRINT_STATS("	Bytes = %llu %llu %llu %llu",
5685 			pdev->stats.rx.rcvd_reo[0].bytes,
5686 			pdev->stats.rx.rcvd_reo[1].bytes,
5687 			pdev->stats.rx.rcvd_reo[2].bytes,
5688 			pdev->stats.rx.rcvd_reo[3].bytes);
5689 	DP_PRINT_STATS("Replenished:");
5690 	DP_PRINT_STATS("	Packets = %d",
5691 			pdev->stats.replenish.pkts.num);
5692 	DP_PRINT_STATS("	Bytes = %llu",
5693 			pdev->stats.replenish.pkts.bytes);
5694 	DP_PRINT_STATS("	Buffers Added To Freelist = %d",
5695 			pdev->stats.buf_freelist);
5696 	DP_PRINT_STATS("	Low threshold intr = %d",
5697 			pdev->stats.replenish.low_thresh_intrs);
5698 	DP_PRINT_STATS("Dropped:");
5699 	DP_PRINT_STATS("	msdu_not_done = %d",
5700 			pdev->stats.dropped.msdu_not_done);
5701 	DP_PRINT_STATS("        mon_rx_drop = %d",
5702 			pdev->stats.dropped.mon_rx_drop);
5703 	DP_PRINT_STATS("Sent To Stack:");
5704 	DP_PRINT_STATS("	Packets = %d",
5705 			pdev->stats.rx.to_stack.num);
5706 	DP_PRINT_STATS("	Bytes = %llu",
5707 			pdev->stats.rx.to_stack.bytes);
5708 	DP_PRINT_STATS("Multicast/Broadcast:");
5709 	DP_PRINT_STATS("	Packets = %d",
5710 			(pdev->stats.rx.multicast.num +
5711 			pdev->stats.rx.bcast.num));
5712 	DP_PRINT_STATS("	Bytes = %llu",
5713 			(pdev->stats.rx.multicast.bytes +
5714 			pdev->stats.rx.bcast.bytes));
5715 	DP_PRINT_STATS("Errors:");
5716 	DP_PRINT_STATS("	Rxdma Ring Un-inititalized = %d",
5717 			pdev->stats.replenish.rxdma_err);
5718 	DP_PRINT_STATS("	Desc Alloc Failed: = %d",
5719 			pdev->stats.err.desc_alloc_fail);
5720 	DP_PRINT_STATS("	IP checksum error = %d",
5721 		       pdev->stats.err.ip_csum_err);
5722 	DP_PRINT_STATS("	TCP/UDP checksum error = %d",
5723 		       pdev->stats.err.tcp_udp_csum_err);
5724 
5725 	/* Get bar_recv_cnt */
5726 	dp_aggregate_pdev_ctrl_frames_stats(pdev);
5727 	DP_PRINT_STATS("BAR Received Count: = %d",
5728 			pdev->stats.rx.bar_recv_cnt);
5729 
5730 }
5731 
5732 /**
5733  * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
5734  * @pdev: DP_PDEV Handle
5735  *
5736  * Return: void
5737  */
5738 static inline void
5739 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
5740 {
5741 	struct cdp_pdev_mon_stats *rx_mon_stats;
5742 
5743 	rx_mon_stats = &pdev->rx_mon_stats;
5744 
5745 	DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
5746 
5747 	dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
5748 
5749 	DP_PRINT_STATS("status_ppdu_done_cnt = %d",
5750 		       rx_mon_stats->status_ppdu_done);
5751 	DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
5752 		       rx_mon_stats->dest_ppdu_done);
5753 	DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
5754 		       rx_mon_stats->dest_mpdu_done);
5755 	DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
5756 		       rx_mon_stats->dest_mpdu_drop);
5757 }
5758 
5759 /**
5760  * dp_print_soc_tx_stats(): Print SOC level  stats
5761  * @soc DP_SOC Handle
5762  *
5763  * Return: void
5764  */
5765 static inline void
5766 dp_print_soc_tx_stats(struct dp_soc *soc)
5767 {
5768 	uint8_t desc_pool_id;
5769 	soc->stats.tx.desc_in_use = 0;
5770 
5771 	DP_PRINT_STATS("SOC Tx Stats:\n");
5772 
5773 	for (desc_pool_id = 0;
5774 	     desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5775 	     desc_pool_id++)
5776 		soc->stats.tx.desc_in_use +=
5777 			soc->tx_desc[desc_pool_id].num_allocated;
5778 
5779 	DP_PRINT_STATS("Tx Descriptors In Use = %d",
5780 			soc->stats.tx.desc_in_use);
5781 	DP_PRINT_STATS("Invalid peer:");
5782 	DP_PRINT_STATS("	Packets = %d",
5783 			soc->stats.tx.tx_invalid_peer.num);
5784 	DP_PRINT_STATS("	Bytes = %llu",
5785 			soc->stats.tx.tx_invalid_peer.bytes);
5786 	DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
5787 			soc->stats.tx.tcl_ring_full[0],
5788 			soc->stats.tx.tcl_ring_full[1],
5789 			soc->stats.tx.tcl_ring_full[2]);
5790 
5791 }
5792 /**
5793  * dp_print_soc_rx_stats: Print SOC level Rx stats
5794  * @soc: DP_SOC Handle
5795  *
5796  * Return:void
5797  */
5798 static inline void
5799 dp_print_soc_rx_stats(struct dp_soc *soc)
5800 {
5801 	uint32_t i;
5802 	char reo_error[DP_REO_ERR_LENGTH];
5803 	char rxdma_error[DP_RXDMA_ERR_LENGTH];
5804 	uint8_t index = 0;
5805 
5806 	DP_PRINT_STATS("SOC Rx Stats:\n");
5807 	DP_PRINT_STATS("Fragmented packets: %u",
5808 		       soc->stats.rx.rx_frags);
5809 	DP_PRINT_STATS("Reo reinjected packets: %u",
5810 		       soc->stats.rx.reo_reinject);
5811 	DP_PRINT_STATS("Errors:\n");
5812 	DP_PRINT_STATS("Rx Decrypt Errors = %d",
5813 			(soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
5814 			soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
5815 	DP_PRINT_STATS("Invalid RBM = %d",
5816 			soc->stats.rx.err.invalid_rbm);
5817 	DP_PRINT_STATS("Invalid Vdev = %d",
5818 			soc->stats.rx.err.invalid_vdev);
5819 	DP_PRINT_STATS("Invalid Pdev = %d",
5820 			soc->stats.rx.err.invalid_pdev);
5821 	DP_PRINT_STATS("Invalid Peer = %d",
5822 			soc->stats.rx.err.rx_invalid_peer.num);
5823 	DP_PRINT_STATS("HAL Ring Access Fail = %d",
5824 			soc->stats.rx.err.hal_ring_access_fail);
5825 
5826 	for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
5827 		index += qdf_snprint(&rxdma_error[index],
5828 				DP_RXDMA_ERR_LENGTH - index,
5829 				" %d", soc->stats.rx.err.rxdma_error[i]);
5830 	}
5831 	DP_PRINT_STATS("RXDMA Error (0-31):%s",
5832 			rxdma_error);
5833 
5834 	index = 0;
5835 	for (i = 0; i < HAL_REO_ERR_MAX; i++) {
5836 		index += qdf_snprint(&reo_error[index],
5837 				DP_REO_ERR_LENGTH - index,
5838 				" %d", soc->stats.rx.err.reo_error[i]);
5839 	}
5840 	DP_PRINT_STATS("REO Error(0-14):%s",
5841 			reo_error);
5842 }
5843 
5844 
5845 /**
5846  * dp_print_ring_stat_from_hal(): Print hal level ring stats
5847  * @soc: DP_SOC handle
5848  * @srng: DP_SRNG handle
5849  * @ring_name: SRNG name
5850  *
5851  * Return: void
5852  */
5853 static inline void
5854 dp_print_ring_stat_from_hal(struct dp_soc *soc,  struct dp_srng *srng,
5855 	char *ring_name)
5856 {
5857 	uint32_t tailp;
5858 	uint32_t headp;
5859 
5860 	if (srng->hal_srng != NULL) {
5861 		hal_api_get_tphp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
5862 		DP_PRINT_STATS("%s : Head pointer = %d  Tail Pointer = %d\n",
5863 				ring_name, headp, tailp);
5864 	}
5865 }
5866 
5867 /**
5868  * dp_print_ring_stats(): Print tail and head pointer
5869  * @pdev: DP_PDEV handle
5870  *
5871  * Return:void
5872  */
5873 static inline void
5874 dp_print_ring_stats(struct dp_pdev *pdev)
5875 {
5876 	uint32_t i;
5877 	char ring_name[STR_MAXLEN + 1];
5878 	int mac_id;
5879 
5880 	dp_print_ring_stat_from_hal(pdev->soc,
5881 			&pdev->soc->reo_exception_ring,
5882 			"Reo Exception Ring");
5883 	dp_print_ring_stat_from_hal(pdev->soc,
5884 			&pdev->soc->reo_reinject_ring,
5885 			"Reo Inject Ring");
5886 	dp_print_ring_stat_from_hal(pdev->soc,
5887 			&pdev->soc->reo_cmd_ring,
5888 			"Reo Command Ring");
5889 	dp_print_ring_stat_from_hal(pdev->soc,
5890 			&pdev->soc->reo_status_ring,
5891 			"Reo Status Ring");
5892 	dp_print_ring_stat_from_hal(pdev->soc,
5893 			&pdev->soc->rx_rel_ring,
5894 			"Rx Release ring");
5895 	dp_print_ring_stat_from_hal(pdev->soc,
5896 			&pdev->soc->tcl_cmd_ring,
5897 			"Tcl command Ring");
5898 	dp_print_ring_stat_from_hal(pdev->soc,
5899 			&pdev->soc->tcl_status_ring,
5900 			"Tcl Status Ring");
5901 	dp_print_ring_stat_from_hal(pdev->soc,
5902 			&pdev->soc->wbm_desc_rel_ring,
5903 			"Wbm Desc Rel Ring");
5904 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5905 		snprintf(ring_name, STR_MAXLEN, "Reo Dest Ring %d", i);
5906 		dp_print_ring_stat_from_hal(pdev->soc,
5907 				&pdev->soc->reo_dest_ring[i],
5908 				ring_name);
5909 	}
5910 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
5911 		snprintf(ring_name, STR_MAXLEN, "Tcl Data Ring %d", i);
5912 		dp_print_ring_stat_from_hal(pdev->soc,
5913 				&pdev->soc->tcl_data_ring[i],
5914 				ring_name);
5915 	}
5916 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
5917 		snprintf(ring_name, STR_MAXLEN, "Tx Comp Ring %d", i);
5918 		dp_print_ring_stat_from_hal(pdev->soc,
5919 				&pdev->soc->tx_comp_ring[i],
5920 				ring_name);
5921 	}
5922 	dp_print_ring_stat_from_hal(pdev->soc,
5923 			&pdev->rx_refill_buf_ring,
5924 			"Rx Refill Buf Ring");
5925 
5926 	dp_print_ring_stat_from_hal(pdev->soc,
5927 			&pdev->rx_refill_buf_ring2,
5928 			"Second Rx Refill Buf Ring");
5929 
5930 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5931 		dp_print_ring_stat_from_hal(pdev->soc,
5932 				&pdev->rxdma_mon_buf_ring[mac_id],
5933 				"Rxdma Mon Buf Ring");
5934 		dp_print_ring_stat_from_hal(pdev->soc,
5935 				&pdev->rxdma_mon_dst_ring[mac_id],
5936 				"Rxdma Mon Dst Ring");
5937 		dp_print_ring_stat_from_hal(pdev->soc,
5938 				&pdev->rxdma_mon_status_ring[mac_id],
5939 				"Rxdma Mon Status Ring");
5940 		dp_print_ring_stat_from_hal(pdev->soc,
5941 				&pdev->rxdma_mon_desc_ring[mac_id],
5942 				"Rxdma mon desc Ring");
5943 	}
5944 
5945 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
5946 		snprintf(ring_name, STR_MAXLEN, "Rxdma err dst ring %d", i);
5947 		dp_print_ring_stat_from_hal(pdev->soc,
5948 			&pdev->rxdma_err_dst_ring[i],
5949 			ring_name);
5950 	}
5951 
5952 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
5953 		snprintf(ring_name, STR_MAXLEN, "Rx mac buf ring %d", i);
5954 		dp_print_ring_stat_from_hal(pdev->soc,
5955 				&pdev->rx_mac_buf_ring[i],
5956 				ring_name);
5957 	}
5958 }
5959 
5960 /**
5961  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
5962  * @vdev: DP_VDEV handle
5963  *
5964  * Return:void
5965  */
5966 static inline void
5967 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
5968 {
5969 	struct dp_peer *peer = NULL;
5970 	struct dp_soc *soc = (struct dp_soc *)vdev->pdev->soc;
5971 
5972 	DP_STATS_CLR(vdev->pdev);
5973 	DP_STATS_CLR(vdev->pdev->soc);
5974 	DP_STATS_CLR(vdev);
5975 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5976 		if (!peer)
5977 			return;
5978 		DP_STATS_CLR(peer);
5979 
5980 		if (soc->cdp_soc.ol_ops->update_dp_stats) {
5981 			soc->cdp_soc.ol_ops->update_dp_stats(
5982 					vdev->pdev->ctrl_pdev,
5983 					&peer->stats,
5984 					peer->peer_ids[0],
5985 					UPDATE_PEER_STATS);
5986 		}
5987 
5988 	}
5989 
5990 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5991 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
5992 				&vdev->stats, (uint16_t)vdev->vdev_id,
5993 				UPDATE_VDEV_STATS);
5994 }
5995 
5996 /**
5997  * dp_print_common_rates_info(): Print common rate for tx or rx
5998  * @pkt_type_array: rate type array contains rate info
5999  *
6000  * Return:void
6001  */
6002 static inline void
6003 dp_print_common_rates_info(struct cdp_pkt_type *pkt_type_array)
6004 {
6005 	uint8_t mcs, pkt_type;
6006 
6007 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
6008 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
6009 			if (!dp_rate_string[pkt_type][mcs].valid)
6010 				continue;
6011 
6012 			DP_PRINT_STATS("	%s = %d",
6013 				       dp_rate_string[pkt_type][mcs].mcs_type,
6014 				       pkt_type_array[pkt_type].mcs_count[mcs]);
6015 		}
6016 
6017 		DP_PRINT_STATS("\n");
6018 	}
6019 }
6020 
6021 /**
6022  * dp_print_rx_rates(): Print Rx rate stats
6023  * @vdev: DP_VDEV handle
6024  *
6025  * Return:void
6026  */
6027 static inline void
6028 dp_print_rx_rates(struct dp_vdev *vdev)
6029 {
6030 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6031 	uint8_t i;
6032 	uint8_t index = 0;
6033 	char nss[DP_NSS_LENGTH];
6034 
6035 	DP_PRINT_STATS("Rx Rate Info:\n");
6036 	dp_print_common_rates_info(pdev->stats.rx.pkt_type);
6037 
6038 
6039 	index = 0;
6040 	for (i = 0; i < SS_COUNT; i++) {
6041 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6042 				" %d", pdev->stats.rx.nss[i]);
6043 	}
6044 	DP_PRINT_STATS("NSS(1-8) = %s",
6045 			nss);
6046 
6047 	DP_PRINT_STATS("SGI ="
6048 			" 0.8us %d,"
6049 			" 0.4us %d,"
6050 			" 1.6us %d,"
6051 			" 3.2us %d,",
6052 			pdev->stats.rx.sgi_count[0],
6053 			pdev->stats.rx.sgi_count[1],
6054 			pdev->stats.rx.sgi_count[2],
6055 			pdev->stats.rx.sgi_count[3]);
6056 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
6057 			pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
6058 			pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
6059 	DP_PRINT_STATS("Reception Type ="
6060 			" SU: %d,"
6061 			" MU_MIMO:%d,"
6062 			" MU_OFDMA:%d,"
6063 			" MU_OFDMA_MIMO:%d\n",
6064 			pdev->stats.rx.reception_type[0],
6065 			pdev->stats.rx.reception_type[1],
6066 			pdev->stats.rx.reception_type[2],
6067 			pdev->stats.rx.reception_type[3]);
6068 	DP_PRINT_STATS("Aggregation:\n");
6069 	DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
6070 			pdev->stats.rx.ampdu_cnt);
6071 	DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
6072 			pdev->stats.rx.non_ampdu_cnt);
6073 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
6074 			pdev->stats.rx.amsdu_cnt);
6075 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
6076 			pdev->stats.rx.non_amsdu_cnt);
6077 }
6078 
6079 /**
6080  * dp_print_tx_rates(): Print tx rates
6081  * @vdev: DP_VDEV handle
6082  *
6083  * Return:void
6084  */
6085 static inline void
6086 dp_print_tx_rates(struct dp_vdev *vdev)
6087 {
6088 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6089 	uint8_t index;
6090 	char nss[DP_NSS_LENGTH];
6091 	int nss_index;
6092 
6093 	DP_PRINT_STATS("Tx Rate Info:\n");
6094 	dp_print_common_rates_info(pdev->stats.tx.pkt_type);
6095 
6096 	DP_PRINT_STATS("SGI ="
6097 			" 0.8us %d"
6098 			" 0.4us %d"
6099 			" 1.6us %d"
6100 			" 3.2us %d",
6101 			pdev->stats.tx.sgi_count[0],
6102 			pdev->stats.tx.sgi_count[1],
6103 			pdev->stats.tx.sgi_count[2],
6104 			pdev->stats.tx.sgi_count[3]);
6105 
6106 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
6107 			pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
6108 			pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
6109 
6110 	index = 0;
6111 	for (nss_index = 0; nss_index < SS_COUNT; nss_index++) {
6112 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6113 				" %d", pdev->stats.tx.nss[nss_index]);
6114 	}
6115 
6116 	DP_PRINT_STATS("NSS(1-8) = %s", nss);
6117 	DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
6118 	DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
6119 	DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
6120 	DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
6121 	DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
6122 
6123 	DP_PRINT_STATS("Aggregation:\n");
6124 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
6125 			pdev->stats.tx.amsdu_cnt);
6126 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
6127 			pdev->stats.tx.non_amsdu_cnt);
6128 }
6129 
6130 /**
6131  * dp_print_peer_stats():print peer stats
6132  * @peer: DP_PEER handle
6133  *
6134  * return void
6135  */
6136 static inline void dp_print_peer_stats(struct dp_peer *peer)
6137 {
6138 	uint8_t i;
6139 	uint32_t index;
6140 	char nss[DP_NSS_LENGTH];
6141 	DP_PRINT_STATS("Node Tx Stats:\n");
6142 	DP_PRINT_STATS("Total Packet Completions = %d",
6143 			peer->stats.tx.comp_pkt.num);
6144 	DP_PRINT_STATS("Total Bytes Completions = %llu",
6145 			peer->stats.tx.comp_pkt.bytes);
6146 	DP_PRINT_STATS("Success Packets = %d",
6147 			peer->stats.tx.tx_success.num);
6148 	DP_PRINT_STATS("Success Bytes = %llu",
6149 			peer->stats.tx.tx_success.bytes);
6150 	DP_PRINT_STATS("Unicast Success Packets = %d",
6151 			peer->stats.tx.ucast.num);
6152 	DP_PRINT_STATS("Unicast Success Bytes = %llu",
6153 			peer->stats.tx.ucast.bytes);
6154 	DP_PRINT_STATS("Multicast Success Packets = %d",
6155 			peer->stats.tx.mcast.num);
6156 	DP_PRINT_STATS("Multicast Success Bytes = %llu",
6157 			peer->stats.tx.mcast.bytes);
6158 	DP_PRINT_STATS("Broadcast Success Packets = %d",
6159 			peer->stats.tx.bcast.num);
6160 	DP_PRINT_STATS("Broadcast Success Bytes = %llu",
6161 			peer->stats.tx.bcast.bytes);
6162 	DP_PRINT_STATS("Packets Failed = %d",
6163 			peer->stats.tx.tx_failed);
6164 	DP_PRINT_STATS("Packets In OFDMA = %d",
6165 			peer->stats.tx.ofdma);
6166 	DP_PRINT_STATS("Packets In STBC = %d",
6167 			peer->stats.tx.stbc);
6168 	DP_PRINT_STATS("Packets In LDPC = %d",
6169 			peer->stats.tx.ldpc);
6170 	DP_PRINT_STATS("Packet Retries = %d",
6171 			peer->stats.tx.retries);
6172 	DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
6173 			peer->stats.tx.amsdu_cnt);
6174 	DP_PRINT_STATS("Last Packet RSSI = %d",
6175 			peer->stats.tx.last_ack_rssi);
6176 	DP_PRINT_STATS("Dropped At FW: Removed = %d",
6177 			peer->stats.tx.dropped.fw_rem);
6178 	DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
6179 			peer->stats.tx.dropped.fw_rem_tx);
6180 	DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
6181 			peer->stats.tx.dropped.fw_rem_notx);
6182 	DP_PRINT_STATS("Dropped : Age Out = %d",
6183 			peer->stats.tx.dropped.age_out);
6184 	DP_PRINT_STATS("NAWDS : ");
6185 	DP_PRINT_STATS("	Nawds multicast Drop Tx Packet = %d",
6186 			peer->stats.tx.nawds_mcast_drop);
6187 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Count = %d",
6188 			peer->stats.tx.nawds_mcast.num);
6189 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Bytes = %llu",
6190 			peer->stats.tx.nawds_mcast.bytes);
6191 
6192 	DP_PRINT_STATS("Rate Info:");
6193 	dp_print_common_rates_info(peer->stats.tx.pkt_type);
6194 
6195 
6196 	DP_PRINT_STATS("SGI = "
6197 			" 0.8us %d"
6198 			" 0.4us %d"
6199 			" 1.6us %d"
6200 			" 3.2us %d",
6201 			peer->stats.tx.sgi_count[0],
6202 			peer->stats.tx.sgi_count[1],
6203 			peer->stats.tx.sgi_count[2],
6204 			peer->stats.tx.sgi_count[3]);
6205 	DP_PRINT_STATS("Excess Retries per AC ");
6206 	DP_PRINT_STATS("	 Best effort = %d",
6207 			peer->stats.tx.excess_retries_per_ac[0]);
6208 	DP_PRINT_STATS("	 Background= %d",
6209 			peer->stats.tx.excess_retries_per_ac[1]);
6210 	DP_PRINT_STATS("	 Video = %d",
6211 			peer->stats.tx.excess_retries_per_ac[2]);
6212 	DP_PRINT_STATS("	 Voice = %d",
6213 			peer->stats.tx.excess_retries_per_ac[3]);
6214 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
6215 			peer->stats.tx.bw[2], peer->stats.tx.bw[3],
6216 			peer->stats.tx.bw[4], peer->stats.tx.bw[5]);
6217 
6218 	index = 0;
6219 	for (i = 0; i < SS_COUNT; i++) {
6220 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6221 				" %d", peer->stats.tx.nss[i]);
6222 	}
6223 	DP_PRINT_STATS("NSS(1-8) = %s",
6224 			nss);
6225 
6226 	DP_PRINT_STATS("Aggregation:");
6227 	DP_PRINT_STATS("	Number of Msdu's Part of Amsdu = %d",
6228 			peer->stats.tx.amsdu_cnt);
6229 	DP_PRINT_STATS("	Number of Msdu's With No Msdu Level Aggregation = %d\n",
6230 			peer->stats.tx.non_amsdu_cnt);
6231 
6232 	DP_PRINT_STATS("Bytes and Packets transmitted  in last one sec:");
6233 	DP_PRINT_STATS("	Bytes transmitted in last sec: %d",
6234 		       peer->stats.tx.tx_byte_rate);
6235 	DP_PRINT_STATS("	Data transmitted in last sec: %d",
6236 		       peer->stats.tx.tx_data_rate);
6237 
6238 	DP_PRINT_STATS("Node Rx Stats:");
6239 	DP_PRINT_STATS("Packets Sent To Stack = %d",
6240 			peer->stats.rx.to_stack.num);
6241 	DP_PRINT_STATS("Bytes Sent To Stack = %llu",
6242 			peer->stats.rx.to_stack.bytes);
6243 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
6244 		DP_PRINT_STATS("Ring Id = %d", i);
6245 		DP_PRINT_STATS("	Packets Received = %d",
6246 				peer->stats.rx.rcvd_reo[i].num);
6247 		DP_PRINT_STATS("	Bytes Received = %llu",
6248 				peer->stats.rx.rcvd_reo[i].bytes);
6249 	}
6250 	DP_PRINT_STATS("Multicast Packets Received = %d",
6251 			peer->stats.rx.multicast.num);
6252 	DP_PRINT_STATS("Multicast Bytes Received = %llu",
6253 			peer->stats.rx.multicast.bytes);
6254 	DP_PRINT_STATS("Broadcast Packets Received = %d",
6255 			peer->stats.rx.bcast.num);
6256 	DP_PRINT_STATS("Broadcast Bytes Received = %llu",
6257 			peer->stats.rx.bcast.bytes);
6258 	DP_PRINT_STATS("Intra BSS Packets Received = %d",
6259 			peer->stats.rx.intra_bss.pkts.num);
6260 	DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
6261 			peer->stats.rx.intra_bss.pkts.bytes);
6262 	DP_PRINT_STATS("Raw Packets Received = %d",
6263 			peer->stats.rx.raw.num);
6264 	DP_PRINT_STATS("Raw Bytes Received = %llu",
6265 			peer->stats.rx.raw.bytes);
6266 	DP_PRINT_STATS("Errors: MIC Errors = %d",
6267 			peer->stats.rx.err.mic_err);
6268 	DP_PRINT_STATS("Erros: Decryption Errors = %d",
6269 			peer->stats.rx.err.decrypt_err);
6270 	DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
6271 			peer->stats.rx.non_ampdu_cnt);
6272 	DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
6273 			peer->stats.rx.ampdu_cnt);
6274 	DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
6275 			peer->stats.rx.non_amsdu_cnt);
6276 	DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
6277 			peer->stats.rx.amsdu_cnt);
6278 	DP_PRINT_STATS("NAWDS : ");
6279 	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet = %d",
6280 			peer->stats.rx.nawds_mcast_drop);
6281 	DP_PRINT_STATS("SGI ="
6282 			" 0.8us %d"
6283 			" 0.4us %d"
6284 			" 1.6us %d"
6285 			" 3.2us %d",
6286 			peer->stats.rx.sgi_count[0],
6287 			peer->stats.rx.sgi_count[1],
6288 			peer->stats.rx.sgi_count[2],
6289 			peer->stats.rx.sgi_count[3]);
6290 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
6291 			peer->stats.rx.bw[0], peer->stats.rx.bw[1],
6292 			peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
6293 	DP_PRINT_STATS("Reception Type ="
6294 			" SU %d,"
6295 			" MU_MIMO %d,"
6296 			" MU_OFDMA %d,"
6297 			" MU_OFDMA_MIMO %d",
6298 			peer->stats.rx.reception_type[0],
6299 			peer->stats.rx.reception_type[1],
6300 			peer->stats.rx.reception_type[2],
6301 			peer->stats.rx.reception_type[3]);
6302 
6303 	dp_print_common_rates_info(peer->stats.rx.pkt_type);
6304 
6305 	index = 0;
6306 	for (i = 0; i < SS_COUNT; i++) {
6307 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6308 				" %d", peer->stats.rx.nss[i]);
6309 	}
6310 	DP_PRINT_STATS("NSS(1-8) = %s",
6311 			nss);
6312 
6313 	DP_PRINT_STATS("Aggregation:");
6314 	DP_PRINT_STATS("	Msdu's Part of Ampdu = %d",
6315 			peer->stats.rx.ampdu_cnt);
6316 	DP_PRINT_STATS("	Msdu's With No Mpdu Level Aggregation = %d",
6317 			peer->stats.rx.non_ampdu_cnt);
6318 	DP_PRINT_STATS("	Msdu's Part of Amsdu = %d",
6319 			peer->stats.rx.amsdu_cnt);
6320 	DP_PRINT_STATS("	Msdu's With No Msdu Level Aggregation = %d",
6321 			peer->stats.rx.non_amsdu_cnt);
6322 
6323 	DP_PRINT_STATS("Bytes and Packets received in last one sec:");
6324 	DP_PRINT_STATS("	Bytes received in last sec: %d",
6325 		       peer->stats.rx.rx_byte_rate);
6326 	DP_PRINT_STATS("	Data received in last sec: %d",
6327 		       peer->stats.rx.rx_data_rate);
6328 }
6329 
6330 /*
6331  * dp_get_host_peer_stats()- function to print peer stats
6332  * @pdev_handle: DP_PDEV handle
6333  * @mac_addr: mac address of the peer
6334  *
6335  * Return: void
6336  */
6337 static void
6338 dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
6339 {
6340 	struct dp_peer *peer;
6341 	uint8_t local_id;
6342 
6343 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
6344 			&local_id);
6345 
6346 	if (!peer) {
6347 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6348 			  "%s: Invalid peer\n", __func__);
6349 		return;
6350 	}
6351 
6352 	dp_print_peer_stats(peer);
6353 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
6354 }
6355 
6356 /**
6357  * dp_print_host_stats()- Function to print the stats aggregated at host
6358  * @vdev_handle: DP_VDEV handle
6359  * @type: host stats type
6360  *
6361  * Available Stat types
6362  * TXRX_CLEAR_STATS  : Clear the stats
6363  * TXRX_RX_RATE_STATS: Print Rx Rate Info
6364  * TXRX_TX_RATE_STATS: Print Tx Rate Info
6365  * TXRX_TX_HOST_STATS: Print Tx Stats
6366  * TXRX_RX_HOST_STATS: Print Rx Stats
6367  * TXRX_AST_STATS: Print AST Stats
6368  * TXRX_SRNG_PTR_STATS: Print SRNG ring pointer stats
6369  *
6370  * Return: 0 on success, print error message in case of failure
6371  */
6372 static int
6373 dp_print_host_stats(struct cdp_vdev *vdev_handle,
6374 		    struct cdp_txrx_stats_req *req)
6375 {
6376 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6377 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6378 	enum cdp_host_txrx_stats type =
6379 			dp_stats_mapping_table[req->stats][STATS_HOST];
6380 
6381 	dp_aggregate_pdev_stats(pdev);
6382 
6383 	switch (type) {
6384 	case TXRX_CLEAR_STATS:
6385 		dp_txrx_host_stats_clr(vdev);
6386 		break;
6387 	case TXRX_RX_RATE_STATS:
6388 		dp_print_rx_rates(vdev);
6389 		break;
6390 	case TXRX_TX_RATE_STATS:
6391 		dp_print_tx_rates(vdev);
6392 		break;
6393 	case TXRX_TX_HOST_STATS:
6394 		dp_print_pdev_tx_stats(pdev);
6395 		dp_print_soc_tx_stats(pdev->soc);
6396 		break;
6397 	case TXRX_RX_HOST_STATS:
6398 		dp_print_pdev_rx_stats(pdev);
6399 		dp_print_soc_rx_stats(pdev->soc);
6400 		break;
6401 	case TXRX_AST_STATS:
6402 		dp_print_ast_stats(pdev->soc);
6403 		dp_print_peer_table(vdev);
6404 		break;
6405 	case TXRX_SRNG_PTR_STATS:
6406 		dp_print_ring_stats(pdev);
6407 		break;
6408 	case TXRX_RX_MON_STATS:
6409 		dp_print_pdev_rx_mon_stats(pdev);
6410 		break;
6411 	case TXRX_REO_QUEUE_STATS:
6412 		dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
6413 		break;
6414 	default:
6415 		DP_TRACE(FATAL, "Wrong Input For TxRx Host Stats");
6416 		break;
6417 	}
6418 	return 0;
6419 }
6420 
6421 /*
6422  * dp_ppdu_ring_reset()- Reset PPDU Stats ring
6423  * @pdev: DP_PDEV handle
6424  *
6425  * Return: void
6426  */
6427 static void
6428 dp_ppdu_ring_reset(struct dp_pdev *pdev)
6429 {
6430 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
6431 	int mac_id;
6432 
6433 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
6434 
6435 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6436 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6437 							pdev->pdev_id);
6438 
6439 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6440 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6441 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6442 	}
6443 }
6444 
6445 /*
6446  * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
6447  * @pdev: DP_PDEV handle
6448  *
6449  * Return: void
6450  */
6451 static void
6452 dp_ppdu_ring_cfg(struct dp_pdev *pdev)
6453 {
6454 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
6455 	int mac_id;
6456 
6457 	htt_tlv_filter.mpdu_start = 1;
6458 	htt_tlv_filter.msdu_start = 0;
6459 	htt_tlv_filter.packet = 0;
6460 	htt_tlv_filter.msdu_end = 0;
6461 	htt_tlv_filter.mpdu_end = 0;
6462 	htt_tlv_filter.attention = 0;
6463 	htt_tlv_filter.ppdu_start = 1;
6464 	htt_tlv_filter.ppdu_end = 1;
6465 	htt_tlv_filter.ppdu_end_user_stats = 1;
6466 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6467 	htt_tlv_filter.ppdu_end_status_done = 1;
6468 	htt_tlv_filter.enable_fp = 1;
6469 	htt_tlv_filter.enable_md = 0;
6470 	if (pdev->neighbour_peers_added &&
6471 	    pdev->soc->hw_nac_monitor_support) {
6472 		htt_tlv_filter.enable_md = 1;
6473 		htt_tlv_filter.packet_header = 1;
6474 	}
6475 	if (pdev->mcopy_mode) {
6476 		htt_tlv_filter.packet_header = 1;
6477 		htt_tlv_filter.enable_mo = 1;
6478 	}
6479 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6480 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6481 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6482 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6483 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6484 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
6485 	if (pdev->neighbour_peers_added &&
6486 	    pdev->soc->hw_nac_monitor_support)
6487 		htt_tlv_filter.md_data_filter = FILTER_DATA_ALL;
6488 
6489 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6490 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6491 						pdev->pdev_id);
6492 
6493 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6494 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6495 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6496 	}
6497 }
6498 
6499 /*
6500  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
6501  *                              modes are enabled or not.
6502  * @dp_pdev: dp pdev handle.
6503  *
6504  * Return: bool
6505  */
6506 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
6507 {
6508 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
6509 	    !pdev->mcopy_mode)
6510 		return true;
6511 	else
6512 		return false;
6513 }
6514 
6515 /*
6516  *dp_set_bpr_enable() - API to enable/disable bpr feature
6517  *@pdev_handle: DP_PDEV handle.
6518  *@val: Provided value.
6519  *
6520  *Return: void
6521  */
6522 static void
6523 dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
6524 {
6525 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6526 
6527 	switch (val) {
6528 	case CDP_BPR_DISABLE:
6529 		pdev->bpr_enable = CDP_BPR_DISABLE;
6530 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6531 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
6532 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6533 		} else if (pdev->enhanced_stats_en &&
6534 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6535 			   !pdev->pktlog_ppdu_stats) {
6536 			dp_h2t_cfg_stats_msg_send(pdev,
6537 						  DP_PPDU_STATS_CFG_ENH_STATS,
6538 						  pdev->pdev_id);
6539 		}
6540 		break;
6541 	case CDP_BPR_ENABLE:
6542 		pdev->bpr_enable = CDP_BPR_ENABLE;
6543 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
6544 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
6545 			dp_h2t_cfg_stats_msg_send(pdev,
6546 						  DP_PPDU_STATS_CFG_BPR,
6547 						  pdev->pdev_id);
6548 		} else if (pdev->enhanced_stats_en &&
6549 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6550 			   !pdev->pktlog_ppdu_stats) {
6551 			dp_h2t_cfg_stats_msg_send(pdev,
6552 						  DP_PPDU_STATS_CFG_BPR_ENH,
6553 						  pdev->pdev_id);
6554 		} else if (pdev->pktlog_ppdu_stats) {
6555 			dp_h2t_cfg_stats_msg_send(pdev,
6556 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
6557 						  pdev->pdev_id);
6558 		}
6559 		break;
6560 	default:
6561 		break;
6562 	}
6563 }
6564 
6565 /*
6566  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
6567  * @pdev_handle: DP_PDEV handle
6568  * @val: user provided value
6569  *
6570  * Return: void
6571  */
6572 static void
6573 dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
6574 {
6575 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6576 
6577 	switch (val) {
6578 	case 0:
6579 		pdev->tx_sniffer_enable = 0;
6580 		pdev->mcopy_mode = 0;
6581 
6582 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6583 		    !pdev->bpr_enable) {
6584 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6585 			dp_ppdu_ring_reset(pdev);
6586 		} else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
6587 			dp_h2t_cfg_stats_msg_send(pdev,
6588 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
6589 		} else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
6590 			dp_h2t_cfg_stats_msg_send(pdev,
6591 						  DP_PPDU_STATS_CFG_BPR_ENH,
6592 						  pdev->pdev_id);
6593 		} else {
6594 			dp_h2t_cfg_stats_msg_send(pdev,
6595 						  DP_PPDU_STATS_CFG_BPR,
6596 						  pdev->pdev_id);
6597 		}
6598 		break;
6599 
6600 	case 1:
6601 		pdev->tx_sniffer_enable = 1;
6602 		pdev->mcopy_mode = 0;
6603 
6604 		if (!pdev->pktlog_ppdu_stats)
6605 			dp_h2t_cfg_stats_msg_send(pdev,
6606 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
6607 		break;
6608 	case 2:
6609 		pdev->mcopy_mode = 1;
6610 		pdev->tx_sniffer_enable = 0;
6611 		dp_ppdu_ring_cfg(pdev);
6612 
6613 		if (!pdev->pktlog_ppdu_stats)
6614 			dp_h2t_cfg_stats_msg_send(pdev,
6615 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
6616 		break;
6617 	default:
6618 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6619 			"Invalid value");
6620 		break;
6621 	}
6622 }
6623 
6624 /*
6625  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
6626  * @pdev_handle: DP_PDEV handle
6627  *
6628  * Return: void
6629  */
6630 static void
6631 dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
6632 {
6633 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6634 
6635 	if (pdev->enhanced_stats_en == 0)
6636 		dp_cal_client_timer_start(pdev->cal_client_ctx);
6637 
6638 	pdev->enhanced_stats_en = 1;
6639 
6640 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added)
6641 		dp_ppdu_ring_cfg(pdev);
6642 
6643 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
6644 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
6645 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
6646 		dp_h2t_cfg_stats_msg_send(pdev,
6647 					  DP_PPDU_STATS_CFG_BPR_ENH,
6648 					  pdev->pdev_id);
6649 	}
6650 }
6651 
6652 /*
6653  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
6654  * @pdev_handle: DP_PDEV handle
6655  *
6656  * Return: void
6657  */
6658 static void
6659 dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
6660 {
6661 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6662 
6663 	if (pdev->enhanced_stats_en == 1)
6664 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
6665 
6666 	pdev->enhanced_stats_en = 0;
6667 
6668 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
6669 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6670 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
6671 		dp_h2t_cfg_stats_msg_send(pdev,
6672 					  DP_PPDU_STATS_CFG_BPR,
6673 					  pdev->pdev_id);
6674 	}
6675 
6676 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added)
6677 		dp_ppdu_ring_reset(pdev);
6678 }
6679 
6680 /*
6681  * dp_get_fw_peer_stats()- function to print peer stats
6682  * @pdev_handle: DP_PDEV handle
6683  * @mac_addr: mac address of the peer
6684  * @cap: Type of htt stats requested
6685  *
6686  * Currently Supporting only MAC ID based requests Only
6687  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
6688  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
6689  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
6690  *
6691  * Return: void
6692  */
6693 static void
6694 dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
6695 		uint32_t cap)
6696 {
6697 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6698 	int i;
6699 	uint32_t config_param0 = 0;
6700 	uint32_t config_param1 = 0;
6701 	uint32_t config_param2 = 0;
6702 	uint32_t config_param3 = 0;
6703 
6704 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
6705 	config_param0 |= (1 << (cap + 1));
6706 
6707 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
6708 		config_param1 |= (1 << i);
6709 	}
6710 
6711 	config_param2 |= (mac_addr[0] & 0x000000ff);
6712 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
6713 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
6714 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
6715 
6716 	config_param3 |= (mac_addr[4] & 0x000000ff);
6717 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
6718 
6719 	dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
6720 			config_param0, config_param1, config_param2,
6721 			config_param3, 0, 0, 0);
6722 
6723 }
6724 
6725 /* This struct definition will be removed from here
6726  * once it get added in FW headers*/
6727 struct httstats_cmd_req {
6728     uint32_t    config_param0;
6729     uint32_t    config_param1;
6730     uint32_t    config_param2;
6731     uint32_t    config_param3;
6732     int cookie;
6733     u_int8_t    stats_id;
6734 };
6735 
6736 /*
6737  * dp_get_htt_stats: function to process the httstas request
6738  * @pdev_handle: DP pdev handle
6739  * @data: pointer to request data
6740  * @data_len: length for request data
6741  *
6742  * return: void
6743  */
6744 static void
6745 dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
6746 {
6747 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6748 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
6749 
6750 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
6751 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
6752 				req->config_param0, req->config_param1,
6753 				req->config_param2, req->config_param3,
6754 				req->cookie, 0, 0);
6755 }
6756 
6757 /*
6758  * dp_set_pdev_param: function to set parameters in pdev
6759  * @pdev_handle: DP pdev handle
6760  * @param: parameter type to be set
6761  * @val: value of parameter to be set
6762  *
6763  * return: void
6764  */
6765 static void dp_set_pdev_param(struct cdp_pdev *pdev_handle,
6766 		enum cdp_pdev_param_type param, uint8_t val)
6767 {
6768 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6769 	switch (param) {
6770 	case CDP_CONFIG_DEBUG_SNIFFER:
6771 		dp_config_debug_sniffer(pdev_handle, val);
6772 		break;
6773 	case CDP_CONFIG_BPR_ENABLE:
6774 		dp_set_bpr_enable(pdev_handle, val);
6775 		break;
6776 	case CDP_CONFIG_PRIMARY_RADIO:
6777 		pdev->is_primary = val;
6778 		break;
6779 	default:
6780 		break;
6781 	}
6782 }
6783 
6784 /*
6785  * dp_set_vdev_param: function to set parameters in vdev
6786  * @param: parameter type to be set
6787  * @val: value of parameter to be set
6788  *
6789  * return: void
6790  */
6791 static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
6792 		enum cdp_vdev_param_type param, uint32_t val)
6793 {
6794 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6795 	switch (param) {
6796 	case CDP_ENABLE_WDS:
6797 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6798 			  "wds_enable %d for vdev(%p) id(%d)\n",
6799 			  val, vdev, vdev->vdev_id);
6800 		vdev->wds_enabled = val;
6801 		break;
6802 	case CDP_ENABLE_NAWDS:
6803 		vdev->nawds_enabled = val;
6804 		break;
6805 	case CDP_ENABLE_MCAST_EN:
6806 		vdev->mcast_enhancement_en = val;
6807 		break;
6808 	case CDP_ENABLE_PROXYSTA:
6809 		vdev->proxysta_vdev = val;
6810 		break;
6811 	case CDP_UPDATE_TDLS_FLAGS:
6812 		vdev->tdls_link_connected = val;
6813 		break;
6814 	case CDP_CFG_WDS_AGING_TIMER:
6815 		if (val == 0)
6816 			qdf_timer_stop(&vdev->pdev->soc->wds_aging_timer);
6817 		else if (val != vdev->wds_aging_timer_val)
6818 			qdf_timer_mod(&vdev->pdev->soc->wds_aging_timer, val);
6819 
6820 		vdev->wds_aging_timer_val = val;
6821 		break;
6822 	case CDP_ENABLE_AP_BRIDGE:
6823 		if (wlan_op_mode_sta != vdev->opmode)
6824 			vdev->ap_bridge_enabled = val;
6825 		else
6826 			vdev->ap_bridge_enabled = false;
6827 		break;
6828 	case CDP_ENABLE_CIPHER:
6829 		vdev->sec_type = val;
6830 		break;
6831 	case CDP_ENABLE_QWRAP_ISOLATION:
6832 		vdev->isolation_vdev = val;
6833 		break;
6834 	default:
6835 		break;
6836 	}
6837 
6838 	dp_tx_vdev_update_search_flags(vdev);
6839 }
6840 
6841 /**
6842  * dp_peer_set_nawds: set nawds bit in peer
6843  * @peer_handle: pointer to peer
6844  * @value: enable/disable nawds
6845  *
6846  * return: void
6847  */
6848 static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
6849 {
6850 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6851 	peer->nawds_enabled = value;
6852 }
6853 
6854 /*
6855  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
6856  * @vdev_handle: DP_VDEV handle
6857  * @map_id:ID of map that needs to be updated
6858  *
6859  * Return: void
6860  */
6861 static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
6862 		uint8_t map_id)
6863 {
6864 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6865 	vdev->dscp_tid_map_id = map_id;
6866 	return;
6867 }
6868 
6869 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
6870  * @peer_handle: DP_PEER handle
6871  *
6872  * return : cdp_peer_stats pointer
6873  */
6874 static struct cdp_peer_stats*
6875 		dp_txrx_get_peer_stats(struct cdp_peer *peer_handle)
6876 {
6877 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6878 
6879 	qdf_assert(peer);
6880 
6881 	return &peer->stats;
6882 }
6883 
6884 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
6885  * @peer_handle: DP_PEER handle
6886  *
6887  * return : void
6888  */
6889 static void dp_txrx_reset_peer_stats(struct cdp_peer *peer_handle)
6890 {
6891 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6892 
6893 	qdf_assert(peer);
6894 
6895 	qdf_mem_set(&peer->stats, sizeof(peer->stats), 0);
6896 }
6897 
6898 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
6899  * @vdev_handle: DP_VDEV handle
6900  * @buf: buffer for vdev stats
6901  *
6902  * return : int
6903  */
6904 static int  dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
6905 				   bool is_aggregate)
6906 {
6907 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6908 	struct cdp_vdev_stats *vdev_stats = (struct cdp_vdev_stats *)buf;
6909 
6910 	if (is_aggregate)
6911 		dp_aggregate_vdev_stats(vdev, buf);
6912 	else
6913 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
6914 
6915 	return 0;
6916 }
6917 
6918 /*
6919  * dp_txrx_stats_publish(): publish pdev stats into a buffer
6920  * @pdev_handle: DP_PDEV handle
6921  * @buf: to hold pdev_stats
6922  *
6923  * Return: int
6924  */
6925 static int
6926 dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
6927 {
6928 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6929 	struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
6930 	struct cdp_txrx_stats_req req = {0,};
6931 
6932 	dp_aggregate_pdev_stats(pdev);
6933 	req.stats = HTT_DBG_EXT_STATS_PDEV_TX;
6934 	req.cookie_val = 1;
6935 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
6936 				req.param1, req.param2, req.param3, 0,
6937 				req.cookie_val, 0);
6938 
6939 	msleep(DP_MAX_SLEEP_TIME);
6940 
6941 	req.stats = HTT_DBG_EXT_STATS_PDEV_RX;
6942 	req.cookie_val = 1;
6943 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
6944 				req.param1, req.param2, req.param3, 0,
6945 				req.cookie_val, 0);
6946 
6947 	msleep(DP_MAX_SLEEP_TIME);
6948 	qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
6949 
6950 	return TXRX_STATS_LEVEL;
6951 }
6952 
6953 /**
6954  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
6955  * @pdev: DP_PDEV handle
6956  * @map_id: ID of map that needs to be updated
6957  * @tos: index value in map
6958  * @tid: tid value passed by the user
6959  *
6960  * Return: void
6961  */
6962 static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
6963 		uint8_t map_id, uint8_t tos, uint8_t tid)
6964 {
6965 	uint8_t dscp;
6966 	struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
6967 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
6968 	pdev->dscp_tid_map[map_id][dscp] = tid;
6969 	if (map_id < HAL_MAX_HW_DSCP_TID_MAPS)
6970 		hal_tx_update_dscp_tid(pdev->soc->hal_soc, tid,
6971 			map_id, dscp);
6972 	return;
6973 }
6974 
6975 /**
6976  * dp_fw_stats_process(): Process TxRX FW stats request
6977  * @vdev_handle: DP VDEV handle
6978  * @req: stats request
6979  *
6980  * return: int
6981  */
6982 static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
6983 		struct cdp_txrx_stats_req *req)
6984 {
6985 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6986 	struct dp_pdev *pdev = NULL;
6987 	uint32_t stats = req->stats;
6988 	uint8_t mac_id = req->mac_id;
6989 
6990 	if (!vdev) {
6991 		DP_TRACE(NONE, "VDEV not found");
6992 		return 1;
6993 	}
6994 	pdev = vdev->pdev;
6995 
6996 	/*
6997 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
6998 	 * from param0 to param3 according to below rule:
6999 	 *
7000 	 * PARAM:
7001 	 *   - config_param0 : start_offset (stats type)
7002 	 *   - config_param1 : stats bmask from start offset
7003 	 *   - config_param2 : stats bmask from start offset + 32
7004 	 *   - config_param3 : stats bmask from start offset + 64
7005 	 */
7006 	if (req->stats == CDP_TXRX_STATS_0) {
7007 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
7008 		req->param1 = 0xFFFFFFFF;
7009 		req->param2 = 0xFFFFFFFF;
7010 		req->param3 = 0xFFFFFFFF;
7011 	}
7012 
7013 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
7014 				req->param1, req->param2, req->param3,
7015 				0, 0, mac_id);
7016 }
7017 
7018 /**
7019  * dp_txrx_stats_request - function to map to firmware and host stats
7020  * @vdev: virtual handle
7021  * @req: stats request
7022  *
7023  * Return: QDF_STATUS
7024  */
7025 static
7026 QDF_STATUS dp_txrx_stats_request(struct cdp_vdev *vdev,
7027 				 struct cdp_txrx_stats_req *req)
7028 {
7029 	int host_stats;
7030 	int fw_stats;
7031 	enum cdp_stats stats;
7032 	int num_stats;
7033 
7034 	if (!vdev || !req) {
7035 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7036 				"Invalid vdev/req instance");
7037 		return QDF_STATUS_E_INVAL;
7038 	}
7039 
7040 	stats = req->stats;
7041 	if (stats >= CDP_TXRX_MAX_STATS)
7042 		return QDF_STATUS_E_INVAL;
7043 
7044 	/*
7045 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
7046 	 *			has to be updated if new FW HTT stats added
7047 	 */
7048 	if (stats > CDP_TXRX_STATS_HTT_MAX)
7049 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
7050 
7051 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
7052 
7053 	if (stats >= num_stats) {
7054 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7055 			  "%s: Invalid stats option: %d", __func__, stats);
7056 		return QDF_STATUS_E_INVAL;
7057 	}
7058 
7059 	req->stats = stats;
7060 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
7061 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
7062 
7063 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7064 		 "stats: %u fw_stats_type: %d host_stats: %d",
7065 		  stats, fw_stats, host_stats);
7066 
7067 	if (fw_stats != TXRX_FW_STATS_INVALID) {
7068 		/* update request with FW stats type */
7069 		req->stats = fw_stats;
7070 		return dp_fw_stats_process(vdev, req);
7071 	}
7072 
7073 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
7074 			(host_stats <= TXRX_HOST_STATS_MAX))
7075 		return dp_print_host_stats(vdev, req);
7076 	else
7077 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7078 				"Wrong Input for TxRx Stats");
7079 
7080 	return QDF_STATUS_SUCCESS;
7081 }
7082 
7083 /*
7084  * dp_print_napi_stats(): NAPI stats
7085  * @soc - soc handle
7086  */
7087 static void dp_print_napi_stats(struct dp_soc *soc)
7088 {
7089 	hif_print_napi_stats(soc->hif_handle);
7090 }
7091 
7092 /*
7093  * dp_print_per_ring_stats(): Packet count per ring
7094  * @soc - soc handle
7095  */
7096 static void dp_print_per_ring_stats(struct dp_soc *soc)
7097 {
7098 	uint8_t ring;
7099 	uint16_t core;
7100 	uint64_t total_packets;
7101 
7102 	DP_TRACE_STATS(INFO_HIGH, "Reo packets per ring:");
7103 	for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
7104 		total_packets = 0;
7105 		DP_TRACE_STATS(INFO_HIGH,
7106 			       "Packets on ring %u:", ring);
7107 		for (core = 0; core < NR_CPUS; core++) {
7108 			DP_TRACE_STATS(INFO_HIGH,
7109 				       "Packets arriving on core %u: %llu",
7110 				       core,
7111 				       soc->stats.rx.ring_packets[core][ring]);
7112 			total_packets += soc->stats.rx.ring_packets[core][ring];
7113 		}
7114 		DP_TRACE_STATS(INFO_HIGH,
7115 			       "Total packets on ring %u: %llu",
7116 			       ring, total_packets);
7117 	}
7118 }
7119 
7120 /*
7121  * dp_txrx_path_stats() - Function to display dump stats
7122  * @soc - soc handle
7123  *
7124  * return: none
7125  */
7126 static void dp_txrx_path_stats(struct dp_soc *soc)
7127 {
7128 	uint8_t error_code;
7129 	uint8_t loop_pdev;
7130 	struct dp_pdev *pdev;
7131 	uint8_t i;
7132 
7133 	if (!soc) {
7134 		DP_TRACE(ERROR, "%s: Invalid access",
7135 			 __func__);
7136 		return;
7137 	}
7138 
7139 	for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
7140 
7141 		pdev = soc->pdev_list[loop_pdev];
7142 		dp_aggregate_pdev_stats(pdev);
7143 		DP_TRACE_STATS(INFO_HIGH, "Tx path Statistics:");
7144 		DP_TRACE_STATS(INFO_HIGH, "from stack: %u msdus (%llu bytes)",
7145 			       pdev->stats.tx_i.rcvd.num,
7146 			       pdev->stats.tx_i.rcvd.bytes);
7147 		DP_TRACE_STATS(INFO_HIGH,
7148 			       "processed from host: %u msdus (%llu bytes)",
7149 			       pdev->stats.tx_i.processed.num,
7150 			       pdev->stats.tx_i.processed.bytes);
7151 		DP_TRACE_STATS(INFO_HIGH,
7152 			       "successfully transmitted: %u msdus (%llu bytes)",
7153 			       pdev->stats.tx.tx_success.num,
7154 			       pdev->stats.tx.tx_success.bytes);
7155 
7156 		DP_TRACE_STATS(INFO_HIGH, "Dropped in host:");
7157 		DP_TRACE_STATS(INFO_HIGH, "Total packets dropped: %u,",
7158 			       pdev->stats.tx_i.dropped.dropped_pkt.num);
7159 		DP_TRACE_STATS(INFO_HIGH, "Descriptor not available: %u",
7160 			       pdev->stats.tx_i.dropped.desc_na.num);
7161 		DP_TRACE_STATS(INFO_HIGH, "Ring full: %u",
7162 			       pdev->stats.tx_i.dropped.ring_full);
7163 		DP_TRACE_STATS(INFO_HIGH, "Enqueue fail: %u",
7164 			       pdev->stats.tx_i.dropped.enqueue_fail);
7165 		DP_TRACE_STATS(INFO_HIGH, "DMA Error: %u",
7166 			       pdev->stats.tx_i.dropped.dma_error);
7167 
7168 		DP_TRACE_STATS(INFO_HIGH, "Dropped in hardware:");
7169 		DP_TRACE_STATS(INFO_HIGH, "total packets dropped: %u",
7170 			       pdev->stats.tx.tx_failed);
7171 		DP_TRACE_STATS(INFO_HIGH, "mpdu age out: %u",
7172 			       pdev->stats.tx.dropped.age_out);
7173 		DP_TRACE_STATS(INFO_HIGH, "firmware removed: %u",
7174 			       pdev->stats.tx.dropped.fw_rem);
7175 		DP_TRACE_STATS(INFO_HIGH, "firmware removed tx: %u",
7176 			       pdev->stats.tx.dropped.fw_rem_tx);
7177 		DP_TRACE_STATS(INFO_HIGH, "firmware removed notx %u",
7178 			       pdev->stats.tx.dropped.fw_rem_notx);
7179 		DP_TRACE_STATS(INFO_HIGH, "peer_invalid: %u",
7180 			       pdev->soc->stats.tx.tx_invalid_peer.num);
7181 
7182 		DP_TRACE_STATS(INFO_HIGH, "Tx packets sent per interrupt:");
7183 		DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
7184 			       pdev->stats.tx_comp_histogram.pkts_1);
7185 		DP_TRACE_STATS(INFO_HIGH, "2-20 Packets:  %u",
7186 			       pdev->stats.tx_comp_histogram.pkts_2_20);
7187 		DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
7188 			       pdev->stats.tx_comp_histogram.pkts_21_40);
7189 		DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
7190 			       pdev->stats.tx_comp_histogram.pkts_41_60);
7191 		DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
7192 			       pdev->stats.tx_comp_histogram.pkts_61_80);
7193 		DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
7194 			       pdev->stats.tx_comp_histogram.pkts_81_100);
7195 		DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
7196 			       pdev->stats.tx_comp_histogram.pkts_101_200);
7197 		DP_TRACE_STATS(INFO_HIGH, "   201+ Packets: %u",
7198 			       pdev->stats.tx_comp_histogram.pkts_201_plus);
7199 
7200 		DP_TRACE_STATS(INFO_HIGH, "Rx path statistics");
7201 
7202 		DP_TRACE_STATS(INFO_HIGH,
7203 			       "delivered %u msdus ( %llu bytes),",
7204 			       pdev->stats.rx.to_stack.num,
7205 			       pdev->stats.rx.to_stack.bytes);
7206 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)
7207 			DP_TRACE_STATS(INFO_HIGH,
7208 				       "received on reo[%d] %u msdus( %llu bytes),",
7209 				       i, pdev->stats.rx.rcvd_reo[i].num,
7210 				       pdev->stats.rx.rcvd_reo[i].bytes);
7211 		DP_TRACE_STATS(INFO_HIGH,
7212 			       "intra-bss packets %u msdus ( %llu bytes),",
7213 			       pdev->stats.rx.intra_bss.pkts.num,
7214 			       pdev->stats.rx.intra_bss.pkts.bytes);
7215 		DP_TRACE_STATS(INFO_HIGH,
7216 			       "intra-bss fails %u msdus ( %llu bytes),",
7217 			       pdev->stats.rx.intra_bss.fail.num,
7218 			       pdev->stats.rx.intra_bss.fail.bytes);
7219 		DP_TRACE_STATS(INFO_HIGH,
7220 			       "raw packets %u msdus ( %llu bytes),",
7221 			       pdev->stats.rx.raw.num,
7222 			       pdev->stats.rx.raw.bytes);
7223 		DP_TRACE_STATS(INFO_HIGH, "dropped: error %u msdus",
7224 			       pdev->stats.rx.err.mic_err);
7225 		DP_TRACE_STATS(INFO_HIGH, "peer invalid %u",
7226 			       pdev->soc->stats.rx.err.rx_invalid_peer.num);
7227 
7228 		DP_TRACE_STATS(INFO_HIGH, "Reo Statistics");
7229 		DP_TRACE_STATS(INFO_HIGH, "rbm error: %u msdus",
7230 			       pdev->soc->stats.rx.err.invalid_rbm);
7231 		DP_TRACE_STATS(INFO_HIGH, "hal ring access fail: %u msdus",
7232 			       pdev->soc->stats.rx.err.hal_ring_access_fail);
7233 
7234 		for (error_code = 0; error_code < HAL_REO_ERR_MAX;
7235 				error_code++) {
7236 			if (!pdev->soc->stats.rx.err.reo_error[error_code])
7237 				continue;
7238 			DP_TRACE_STATS(INFO_HIGH,
7239 				       "Reo error number (%u): %u msdus",
7240 				       error_code,
7241 				       pdev->soc->stats.rx.err
7242 				       .reo_error[error_code]);
7243 		}
7244 
7245 		for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
7246 				error_code++) {
7247 			if (!pdev->soc->stats.rx.err.rxdma_error[error_code])
7248 				continue;
7249 			DP_TRACE_STATS(INFO_HIGH,
7250 				       "Rxdma error number (%u): %u msdus",
7251 				       error_code,
7252 				       pdev->soc->stats.rx.err
7253 				       .rxdma_error[error_code]);
7254 		}
7255 
7256 		DP_TRACE_STATS(INFO_HIGH, "Rx packets reaped per interrupt:");
7257 		DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
7258 			       pdev->stats.rx_ind_histogram.pkts_1);
7259 		DP_TRACE_STATS(INFO_HIGH, "2-20 Packets:  %u",
7260 			       pdev->stats.rx_ind_histogram.pkts_2_20);
7261 		DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
7262 			       pdev->stats.rx_ind_histogram.pkts_21_40);
7263 		DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
7264 			       pdev->stats.rx_ind_histogram.pkts_41_60);
7265 		DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
7266 			       pdev->stats.rx_ind_histogram.pkts_61_80);
7267 		DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
7268 			       pdev->stats.rx_ind_histogram.pkts_81_100);
7269 		DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
7270 			       pdev->stats.rx_ind_histogram.pkts_101_200);
7271 		DP_TRACE_STATS(INFO_HIGH, "   201+ Packets: %u",
7272 			       pdev->stats.rx_ind_histogram.pkts_201_plus);
7273 
7274 		DP_TRACE_STATS(INFO_HIGH, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
7275 			       __func__,
7276 			       pdev->soc->wlan_cfg_ctx
7277 			       ->tso_enabled,
7278 			       pdev->soc->wlan_cfg_ctx
7279 			       ->lro_enabled,
7280 			       pdev->soc->wlan_cfg_ctx
7281 			       ->rx_hash,
7282 			       pdev->soc->wlan_cfg_ctx
7283 			       ->napi_enabled);
7284 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7285 		DP_TRACE_STATS(INFO_HIGH, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
7286 			       __func__,
7287 			       pdev->soc->wlan_cfg_ctx
7288 			       ->tx_flow_stop_queue_threshold,
7289 			       pdev->soc->wlan_cfg_ctx
7290 			       ->tx_flow_start_queue_offset);
7291 #endif
7292 	}
7293 }
7294 
7295 /*
7296  * dp_txrx_dump_stats() -  Dump statistics
7297  * @value - Statistics option
7298  */
7299 static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
7300 				     enum qdf_stats_verbosity_level level)
7301 {
7302 	struct dp_soc *soc =
7303 		(struct dp_soc *)psoc;
7304 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7305 
7306 	if (!soc) {
7307 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7308 			"%s: soc is NULL", __func__);
7309 		return QDF_STATUS_E_INVAL;
7310 	}
7311 
7312 	switch (value) {
7313 	case CDP_TXRX_PATH_STATS:
7314 		dp_txrx_path_stats(soc);
7315 		break;
7316 
7317 	case CDP_RX_RING_STATS:
7318 		dp_print_per_ring_stats(soc);
7319 		break;
7320 
7321 	case CDP_TXRX_TSO_STATS:
7322 		/* TODO: NOT IMPLEMENTED */
7323 		break;
7324 
7325 	case CDP_DUMP_TX_FLOW_POOL_INFO:
7326 		cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
7327 		break;
7328 
7329 	case CDP_DP_NAPI_STATS:
7330 		dp_print_napi_stats(soc);
7331 		break;
7332 
7333 	case CDP_TXRX_DESC_STATS:
7334 		/* TODO: NOT IMPLEMENTED */
7335 		break;
7336 
7337 	default:
7338 		status = QDF_STATUS_E_INVAL;
7339 		break;
7340 	}
7341 
7342 	return status;
7343 
7344 }
7345 
7346 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7347 /**
7348  * dp_update_flow_control_parameters() - API to store datapath
7349  *                            config parameters
7350  * @soc: soc handle
7351  * @cfg: ini parameter handle
7352  *
7353  * Return: void
7354  */
7355 static inline
7356 void dp_update_flow_control_parameters(struct dp_soc *soc,
7357 				struct cdp_config_params *params)
7358 {
7359 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
7360 					params->tx_flow_stop_queue_threshold;
7361 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
7362 					params->tx_flow_start_queue_offset;
7363 }
7364 #else
7365 static inline
7366 void dp_update_flow_control_parameters(struct dp_soc *soc,
7367 				struct cdp_config_params *params)
7368 {
7369 }
7370 #endif
7371 
7372 /**
7373  * dp_update_config_parameters() - API to store datapath
7374  *                            config parameters
7375  * @soc: soc handle
7376  * @cfg: ini parameter handle
7377  *
7378  * Return: status
7379  */
7380 static
7381 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
7382 				struct cdp_config_params *params)
7383 {
7384 	struct dp_soc *soc = (struct dp_soc *)psoc;
7385 
7386 	if (!(soc)) {
7387 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7388 				"%s: Invalid handle", __func__);
7389 		return QDF_STATUS_E_INVAL;
7390 	}
7391 
7392 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
7393 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
7394 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
7395 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
7396 				params->tcp_udp_checksumoffload;
7397 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
7398 	dp_update_flow_control_parameters(soc, params);
7399 
7400 	return QDF_STATUS_SUCCESS;
7401 }
7402 
7403 /**
7404  * dp_txrx_set_wds_rx_policy() - API to store datapath
7405  *                            config parameters
7406  * @vdev_handle - datapath vdev handle
7407  * @cfg: ini parameter handle
7408  *
7409  * Return: status
7410  */
7411 #ifdef WDS_VENDOR_EXTENSION
7412 void
7413 dp_txrx_set_wds_rx_policy(
7414 		struct cdp_vdev *vdev_handle,
7415 		u_int32_t val)
7416 {
7417 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7418 	struct dp_peer *peer;
7419 	if (vdev->opmode == wlan_op_mode_ap) {
7420 		/* for ap, set it on bss_peer */
7421 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
7422 			if (peer->bss_peer) {
7423 				peer->wds_ecm.wds_rx_filter = 1;
7424 				peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7425 				peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7426 				break;
7427 			}
7428 		}
7429 	} else if (vdev->opmode == wlan_op_mode_sta) {
7430 		peer = TAILQ_FIRST(&vdev->peer_list);
7431 		peer->wds_ecm.wds_rx_filter = 1;
7432 		peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7433 		peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7434 	}
7435 }
7436 
7437 /**
7438  * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
7439  *
7440  * @peer_handle - datapath peer handle
7441  * @wds_tx_ucast: policy for unicast transmission
7442  * @wds_tx_mcast: policy for multicast transmission
7443  *
7444  * Return: void
7445  */
7446 void
7447 dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
7448 		int wds_tx_ucast, int wds_tx_mcast)
7449 {
7450 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
7451 	if (wds_tx_ucast || wds_tx_mcast) {
7452 		peer->wds_enabled = 1;
7453 		peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
7454 		peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
7455 	} else {
7456 		peer->wds_enabled = 0;
7457 		peer->wds_ecm.wds_tx_ucast_4addr = 0;
7458 		peer->wds_ecm.wds_tx_mcast_4addr = 0;
7459 	}
7460 
7461 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7462 			FL("Policy Update set to :\
7463 				peer->wds_enabled %d\
7464 				peer->wds_ecm.wds_tx_ucast_4addr %d\
7465 				peer->wds_ecm.wds_tx_mcast_4addr %d"),
7466 				peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
7467 				peer->wds_ecm.wds_tx_mcast_4addr);
7468 	return;
7469 }
7470 #endif
7471 
7472 static struct cdp_wds_ops dp_ops_wds = {
7473 	.vdev_set_wds = dp_vdev_set_wds,
7474 #ifdef WDS_VENDOR_EXTENSION
7475 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
7476 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
7477 #endif
7478 };
7479 
7480 /*
7481  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
7482  * @vdev_handle - datapath vdev handle
7483  * @callback - callback function
7484  * @ctxt: callback context
7485  *
7486  */
7487 static void
7488 dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
7489 		       ol_txrx_data_tx_cb callback, void *ctxt)
7490 {
7491 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7492 
7493 	vdev->tx_non_std_data_callback.func = callback;
7494 	vdev->tx_non_std_data_callback.ctxt = ctxt;
7495 }
7496 
7497 /**
7498  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
7499  * @pdev_hdl: datapath pdev handle
7500  *
7501  * Return: opaque pointer to dp txrx handle
7502  */
7503 static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
7504 {
7505 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7506 
7507 	return pdev->dp_txrx_handle;
7508 }
7509 
7510 /**
7511  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
7512  * @pdev_hdl: datapath pdev handle
7513  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
7514  *
7515  * Return: void
7516  */
7517 static void
7518 dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
7519 {
7520 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7521 
7522 	pdev->dp_txrx_handle = dp_txrx_hdl;
7523 }
7524 
7525 /**
7526  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
7527  * @soc_handle: datapath soc handle
7528  *
7529  * Return: opaque pointer to external dp (non-core DP)
7530  */
7531 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
7532 {
7533 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
7534 
7535 	return soc->external_txrx_handle;
7536 }
7537 
7538 /**
7539  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
7540  * @soc_handle: datapath soc handle
7541  * @txrx_handle: opaque pointer to external dp (non-core DP)
7542  *
7543  * Return: void
7544  */
7545 static void
7546 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
7547 {
7548 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
7549 
7550 	soc->external_txrx_handle = txrx_handle;
7551 }
7552 
7553 #ifdef FEATURE_AST
7554 static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
7555 {
7556 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
7557 	struct dp_peer *peer = (struct dp_peer *) peer_hdl;
7558 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7559 
7560 	/*
7561 	 * For BSS peer, new peer is not created on alloc_node if the
7562 	 * peer with same address already exists , instead refcnt is
7563 	 * increased for existing peer. Correspondingly in delete path,
7564 	 * only refcnt is decreased; and peer is only deleted , when all
7565 	 * references are deleted. So delete_in_progress should not be set
7566 	 * for bss_peer, unless only 2 reference remains (peer map reference
7567 	 * and peer hash table reference).
7568 	 */
7569 	if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
7570 		return;
7571 	}
7572 
7573 	peer->delete_in_progress = true;
7574 	dp_peer_delete_ast_entries(soc, peer);
7575 }
7576 #endif
7577 
7578 #ifdef ATH_SUPPORT_NAC_RSSI
7579 /**
7580  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
7581  * @vdev_hdl: DP vdev handle
7582  * @rssi: rssi value
7583  *
7584  * Return: 0 for success. nonzero for failure.
7585  */
7586 QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
7587 				       char *mac_addr,
7588 				       uint8_t *rssi)
7589 {
7590 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7591 	struct dp_pdev *pdev = vdev->pdev;
7592 	struct dp_neighbour_peer *peer = NULL;
7593 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
7594 
7595 	*rssi = 0;
7596 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
7597 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
7598 		      neighbour_peer_list_elem) {
7599 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
7600 				mac_addr, DP_MAC_ADDR_LEN) == 0) {
7601 			*rssi = peer->rssi;
7602 			status = QDF_STATUS_SUCCESS;
7603 			break;
7604 		}
7605 	}
7606 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
7607 	return status;
7608 }
7609 
7610 static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
7611 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
7612 		uint8_t chan_num)
7613 {
7614 
7615 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7616 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7617 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7618 
7619 	pdev->nac_rssi_filtering = 1;
7620 	/* Store address of NAC (neighbour peer) which will be checked
7621 	 * against TA of received packets.
7622 	 */
7623 
7624 	if (cmd == CDP_NAC_PARAM_ADD) {
7625 		dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
7626 						 client_macaddr);
7627 	} else if (cmd == CDP_NAC_PARAM_DEL) {
7628 		dp_update_filter_neighbour_peers(vdev_handle,
7629 						 DP_NAC_PARAM_DEL,
7630 						 client_macaddr);
7631 	}
7632 
7633 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
7634 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
7635 			((void *)vdev->pdev->ctrl_pdev,
7636 			 vdev->vdev_id, cmd, bssid);
7637 
7638 	return QDF_STATUS_SUCCESS;
7639 }
7640 #endif
7641 
7642 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
7643 		uint32_t max_peers)
7644 {
7645 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7646 
7647 	soc->max_peers = max_peers;
7648 
7649 	qdf_print ("%s max_peers %u\n", __func__, max_peers);
7650 
7651 	if (dp_peer_find_attach(soc))
7652 		return QDF_STATUS_E_FAILURE;
7653 
7654 	return QDF_STATUS_SUCCESS;
7655 }
7656 
7657 /**
7658  * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
7659  * @dp_pdev: dp pdev handle
7660  * @ctrl_pdev: UMAC ctrl pdev handle
7661  *
7662  * Return: void
7663  */
7664 static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
7665 				  struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
7666 {
7667 	struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
7668 
7669 	pdev->ctrl_pdev = ctrl_pdev;
7670 }
7671 
7672 static struct cdp_cmn_ops dp_ops_cmn = {
7673 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
7674 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
7675 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
7676 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
7677 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
7678 	.txrx_peer_create = dp_peer_create_wifi3,
7679 	.txrx_peer_setup = dp_peer_setup_wifi3,
7680 #ifdef FEATURE_AST
7681 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
7682 #else
7683 	.txrx_peer_teardown = NULL,
7684 #endif
7685 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
7686 	.txrx_peer_del_ast = dp_peer_del_ast_wifi3,
7687 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
7688 	.txrx_peer_ast_hash_find = dp_peer_ast_hash_find_wifi3,
7689 	.txrx_peer_ast_get_pdev_id = dp_peer_ast_get_pdev_id_wifi3,
7690 	.txrx_peer_ast_get_next_hop = dp_peer_ast_get_next_hop_wifi3,
7691 	.txrx_peer_ast_set_type = dp_peer_ast_set_type_wifi3,
7692 	.txrx_peer_ast_get_type = dp_peer_ast_get_type_wifi3,
7693 	.txrx_peer_delete = dp_peer_delete_wifi3,
7694 	.txrx_vdev_register = dp_vdev_register_wifi3,
7695 	.txrx_soc_detach = dp_soc_detach_wifi3,
7696 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
7697 	.txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
7698 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
7699 	.txrx_ath_getstats = dp_get_device_stats,
7700 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
7701 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
7702 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
7703 	.delba_process = dp_delba_process_wifi3,
7704 	.set_addba_response = dp_set_addba_response,
7705 	.get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
7706 	.flush_cache_rx_queue = NULL,
7707 	/* TODO: get API's for dscp-tid need to be added*/
7708 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
7709 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
7710 	.txrx_stats_request = dp_txrx_stats_request,
7711 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
7712 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
7713 	.txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
7714 	.txrx_set_nac = dp_set_nac,
7715 	.txrx_get_tx_pending = dp_get_tx_pending,
7716 	.txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
7717 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
7718 	.display_stats = dp_txrx_dump_stats,
7719 	.txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
7720 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
7721 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
7722 	.txrx_intr_detach = dp_soc_interrupt_detach,
7723 	.set_pn_check = dp_set_pn_check_wifi3,
7724 	.update_config_parameters = dp_update_config_parameters,
7725 	/* TODO: Add other functions */
7726 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
7727 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
7728 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
7729 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
7730 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
7731 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
7732 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
7733 	.tx_send = dp_tx_send,
7734 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
7735 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
7736 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
7737 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
7738 	.txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
7739 	.txrx_get_os_rx_handles_from_vdev =
7740 					dp_get_os_rx_handles_from_vdev_wifi3,
7741 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
7742 };
7743 
7744 static struct cdp_ctrl_ops dp_ops_ctrl = {
7745 	.txrx_peer_authorize = dp_peer_authorize,
7746 #ifdef QCA_SUPPORT_SON
7747 	.txrx_set_inact_params = dp_set_inact_params,
7748 	.txrx_start_inact_timer = dp_start_inact_timer,
7749 	.txrx_set_overload = dp_set_overload,
7750 	.txrx_peer_is_inact = dp_peer_is_inact,
7751 	.txrx_mark_peer_inact = dp_mark_peer_inact,
7752 #endif
7753 	.txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
7754 	.txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
7755 #ifdef MESH_MODE_SUPPORT
7756 	.txrx_set_mesh_mode  = dp_peer_set_mesh_mode,
7757 	.txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
7758 #endif
7759 	.txrx_set_vdev_param = dp_set_vdev_param,
7760 	.txrx_peer_set_nawds = dp_peer_set_nawds,
7761 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
7762 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
7763 	.txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
7764 	.txrx_update_filter_neighbour_peers =
7765 		dp_update_filter_neighbour_peers,
7766 	.txrx_get_sec_type = dp_get_sec_type,
7767 	/* TODO: Add other functions */
7768 	.txrx_wdi_event_sub = dp_wdi_event_sub,
7769 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
7770 #ifdef WDI_EVENT_ENABLE
7771 	.txrx_get_pldev = dp_get_pldev,
7772 #endif
7773 	.txrx_set_pdev_param = dp_set_pdev_param,
7774 #ifdef ATH_SUPPORT_NAC_RSSI
7775 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
7776 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
7777 #endif
7778 	.set_key = dp_set_michael_key,
7779 };
7780 
7781 static struct cdp_me_ops dp_ops_me = {
7782 #ifdef ATH_SUPPORT_IQUE
7783 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
7784 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
7785 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
7786 #endif
7787 };
7788 
7789 static struct cdp_mon_ops dp_ops_mon = {
7790 	.txrx_monitor_set_filter_ucast_data = NULL,
7791 	.txrx_monitor_set_filter_mcast_data = NULL,
7792 	.txrx_monitor_set_filter_non_data = NULL,
7793 	.txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
7794 	.txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
7795 	.txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
7796 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
7797 	/* Added support for HK advance filter */
7798 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
7799 };
7800 
7801 static struct cdp_host_stats_ops dp_ops_host_stats = {
7802 	.txrx_per_peer_stats = dp_get_host_peer_stats,
7803 	.get_fw_peer_stats = dp_get_fw_peer_stats,
7804 	.get_htt_stats = dp_get_htt_stats,
7805 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
7806 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
7807 	.txrx_stats_publish = dp_txrx_stats_publish,
7808 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
7809 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
7810 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
7811 	/* TODO */
7812 };
7813 
7814 static struct cdp_raw_ops dp_ops_raw = {
7815 	/* TODO */
7816 };
7817 
7818 #ifdef CONFIG_WIN
7819 static struct cdp_pflow_ops dp_ops_pflow = {
7820 	/* TODO */
7821 };
7822 #endif /* CONFIG_WIN */
7823 
7824 #ifdef FEATURE_RUNTIME_PM
7825 /**
7826  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
7827  * @opaque_pdev: DP pdev context
7828  *
7829  * DP is ready to runtime suspend if there are no pending TX packets.
7830  *
7831  * Return: QDF_STATUS
7832  */
7833 static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
7834 {
7835 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7836 	struct dp_soc *soc = pdev->soc;
7837 
7838 	/* Abort if there are any pending TX packets */
7839 	if (dp_get_tx_pending(opaque_pdev) > 0) {
7840 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7841 			  FL("Abort suspend due to pending TX packets"));
7842 		return QDF_STATUS_E_AGAIN;
7843 	}
7844 
7845 	if (soc->intr_mode == DP_INTR_POLL)
7846 		qdf_timer_stop(&soc->int_timer);
7847 
7848 	return QDF_STATUS_SUCCESS;
7849 }
7850 
7851 /**
7852  * dp_runtime_resume() - ensure DP is ready to runtime resume
7853  * @opaque_pdev: DP pdev context
7854  *
7855  * Resume DP for runtime PM.
7856  *
7857  * Return: QDF_STATUS
7858  */
7859 static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
7860 {
7861 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7862 	struct dp_soc *soc = pdev->soc;
7863 	void *hal_srng;
7864 	int i;
7865 
7866 	if (soc->intr_mode == DP_INTR_POLL)
7867 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7868 
7869 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
7870 		hal_srng = soc->tcl_data_ring[i].hal_srng;
7871 		if (hal_srng) {
7872 			/* We actually only need to acquire the lock */
7873 			hal_srng_access_start(soc->hal_soc, hal_srng);
7874 			/* Update SRC ring head pointer for HW to send
7875 			   all pending packets */
7876 			hal_srng_access_end(soc->hal_soc, hal_srng);
7877 		}
7878 	}
7879 
7880 	return QDF_STATUS_SUCCESS;
7881 }
7882 #endif /* FEATURE_RUNTIME_PM */
7883 
7884 static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
7885 {
7886 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7887 	struct dp_soc *soc = pdev->soc;
7888 
7889 	if (soc->intr_mode == DP_INTR_POLL)
7890 		qdf_timer_stop(&soc->int_timer);
7891 
7892 	return QDF_STATUS_SUCCESS;
7893 }
7894 
7895 static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
7896 {
7897 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7898 	struct dp_soc *soc = pdev->soc;
7899 
7900 	if (soc->intr_mode == DP_INTR_POLL)
7901 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7902 
7903 	return QDF_STATUS_SUCCESS;
7904 }
7905 
7906 #ifndef CONFIG_WIN
7907 static struct cdp_misc_ops dp_ops_misc = {
7908 	.tx_non_std = dp_tx_non_std,
7909 	.get_opmode = dp_get_opmode,
7910 #ifdef FEATURE_RUNTIME_PM
7911 	.runtime_suspend = dp_runtime_suspend,
7912 	.runtime_resume = dp_runtime_resume,
7913 #endif /* FEATURE_RUNTIME_PM */
7914 	.pkt_log_init = dp_pkt_log_init,
7915 	.pkt_log_con_service = dp_pkt_log_con_service,
7916 };
7917 
7918 static struct cdp_flowctl_ops dp_ops_flowctl = {
7919 	/* WIFI 3.0 DP implement as required. */
7920 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7921 	.flow_pool_map_handler = dp_tx_flow_pool_map,
7922 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
7923 	.register_pause_cb = dp_txrx_register_pause_cb,
7924 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
7925 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
7926 };
7927 
7928 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
7929 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7930 };
7931 
7932 #ifdef IPA_OFFLOAD
7933 static struct cdp_ipa_ops dp_ops_ipa = {
7934 	.ipa_get_resource = dp_ipa_get_resource,
7935 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
7936 	.ipa_op_response = dp_ipa_op_response,
7937 	.ipa_register_op_cb = dp_ipa_register_op_cb,
7938 	.ipa_get_stat = dp_ipa_get_stat,
7939 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
7940 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
7941 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
7942 	.ipa_setup = dp_ipa_setup,
7943 	.ipa_cleanup = dp_ipa_cleanup,
7944 	.ipa_setup_iface = dp_ipa_setup_iface,
7945 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
7946 	.ipa_enable_pipes = dp_ipa_enable_pipes,
7947 	.ipa_disable_pipes = dp_ipa_disable_pipes,
7948 	.ipa_set_perf_level = dp_ipa_set_perf_level
7949 };
7950 #endif
7951 
7952 static struct cdp_bus_ops dp_ops_bus = {
7953 	.bus_suspend = dp_bus_suspend,
7954 	.bus_resume = dp_bus_resume
7955 };
7956 
7957 static struct cdp_ocb_ops dp_ops_ocb = {
7958 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7959 };
7960 
7961 
7962 static struct cdp_throttle_ops dp_ops_throttle = {
7963 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7964 };
7965 
7966 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
7967 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7968 };
7969 
7970 static struct cdp_cfg_ops dp_ops_cfg = {
7971 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7972 };
7973 
7974 /*
7975  * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
7976  * @dev: physical device instance
7977  * @peer_mac_addr: peer mac address
7978  * @local_id: local id for the peer
7979  * @debug_id: to track enum peer access
7980  *
7981  * Return: peer instance pointer
7982  */
7983 static inline void *
7984 dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, u8 *peer_mac_addr,
7985 			     u8 *local_id, enum peer_debug_id_type debug_id)
7986 {
7987 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
7988 	struct dp_peer *peer;
7989 
7990 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
7991 
7992 	if (!peer)
7993 		return NULL;
7994 
7995 	*local_id = peer->local_id;
7996 	DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
7997 
7998 	return peer;
7999 }
8000 
8001 /*
8002  * dp_peer_release_ref - release peer ref count
8003  * @peer: peer handle
8004  * @debug_id: to track enum peer access
8005  *
8006  * Return: None
8007  */
8008 static inline
8009 void dp_peer_release_ref(void *peer, enum peer_debug_id_type debug_id)
8010 {
8011 	dp_peer_unref_delete(peer);
8012 }
8013 
8014 static struct cdp_peer_ops dp_ops_peer = {
8015 	.register_peer = dp_register_peer,
8016 	.clear_peer = dp_clear_peer,
8017 	.find_peer_by_addr = dp_find_peer_by_addr,
8018 	.find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
8019 	.peer_get_ref_by_addr = dp_peer_get_ref_find_by_addr,
8020 	.peer_release_ref = dp_peer_release_ref,
8021 	.local_peer_id = dp_local_peer_id,
8022 	.peer_find_by_local_id = dp_peer_find_by_local_id,
8023 	.peer_state_update = dp_peer_state_update,
8024 	.get_vdevid = dp_get_vdevid,
8025 	.get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
8026 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
8027 	.get_vdev_for_peer = dp_get_vdev_for_peer,
8028 	.get_peer_state = dp_get_peer_state,
8029 };
8030 #endif
8031 
8032 static struct cdp_ops dp_txrx_ops = {
8033 	.cmn_drv_ops = &dp_ops_cmn,
8034 	.ctrl_ops = &dp_ops_ctrl,
8035 	.me_ops = &dp_ops_me,
8036 	.mon_ops = &dp_ops_mon,
8037 	.host_stats_ops = &dp_ops_host_stats,
8038 	.wds_ops = &dp_ops_wds,
8039 	.raw_ops = &dp_ops_raw,
8040 #ifdef CONFIG_WIN
8041 	.pflow_ops = &dp_ops_pflow,
8042 #endif /* CONFIG_WIN */
8043 #ifndef CONFIG_WIN
8044 	.misc_ops = &dp_ops_misc,
8045 	.cfg_ops = &dp_ops_cfg,
8046 	.flowctl_ops = &dp_ops_flowctl,
8047 	.l_flowctl_ops = &dp_ops_l_flowctl,
8048 #ifdef IPA_OFFLOAD
8049 	.ipa_ops = &dp_ops_ipa,
8050 #endif
8051 	.bus_ops = &dp_ops_bus,
8052 	.ocb_ops = &dp_ops_ocb,
8053 	.peer_ops = &dp_ops_peer,
8054 	.throttle_ops = &dp_ops_throttle,
8055 	.mob_stats_ops = &dp_ops_mob_stats,
8056 #endif
8057 };
8058 
8059 /*
8060  * dp_soc_set_txrx_ring_map()
8061  * @dp_soc: DP handler for soc
8062  *
8063  * Return: Void
8064  */
8065 static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
8066 {
8067 	uint32_t i;
8068 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
8069 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_DEFAULT_MAP][i];
8070 	}
8071 }
8072 
8073 #ifdef QCA_WIFI_QCA8074
8074 /**
8075  * dp_soc_attach_wifi3() - Attach txrx SOC
8076  * @ctrl_psoc:	Opaque SOC handle from control plane
8077  * @htc_handle:	Opaque HTC handle
8078  * @hif_handle:	Opaque HIF handle
8079  * @qdf_osdev:	QDF device
8080  * @ol_ops:	Offload Operations
8081  * @device_id:	Device ID
8082  *
8083  * Return: DP SOC handle on success, NULL on failure
8084  */
8085 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
8086 			  HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
8087 			  struct ol_if_ops *ol_ops, uint16_t device_id)
8088 {
8089 	struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
8090 	int target_type;
8091 
8092 	if (!soc) {
8093 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8094 			FL("DP SOC memory allocation failed"));
8095 		goto fail0;
8096 	}
8097 
8098 	soc->device_id = device_id;
8099 	soc->cdp_soc.ops = &dp_txrx_ops;
8100 	soc->cdp_soc.ol_ops = ol_ops;
8101 	soc->ctrl_psoc = ctrl_psoc;
8102 	soc->osdev = qdf_osdev;
8103 	soc->hif_handle = hif_handle;
8104 
8105 	soc->hal_soc = hif_get_hal_handle(hif_handle);
8106 	soc->htt_handle = htt_soc_attach(soc, ctrl_psoc, htc_handle,
8107 		soc->hal_soc, qdf_osdev);
8108 	if (!soc->htt_handle) {
8109 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8110 			FL("HTT attach failed"));
8111 		goto fail1;
8112 	}
8113 
8114 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
8115 	if (!soc->wlan_cfg_ctx) {
8116 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8117 			FL("wlan_cfg_soc_attach failed"));
8118 		goto fail2;
8119 	}
8120 	target_type = hal_get_target_type(soc->hal_soc);
8121 	switch (target_type) {
8122 	case TARGET_TYPE_QCA6290:
8123 #ifdef QCA_WIFI_QCA6390
8124 	case TARGET_TYPE_QCA6390:
8125 #endif
8126 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
8127 					       REO_DST_RING_SIZE_QCA6290);
8128 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
8129 		break;
8130 	case TARGET_TYPE_QCA8074:
8131 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
8132 					       REO_DST_RING_SIZE_QCA8074);
8133 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
8134 		soc->hw_nac_monitor_support = 1;
8135 		break;
8136 	case TARGET_TYPE_QCA8074V2:
8137 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
8138 					       REO_DST_RING_SIZE_QCA8074);
8139 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
8140 		soc->hw_nac_monitor_support = 1;
8141 		break;
8142 	default:
8143 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
8144 		qdf_assert_always(0);
8145 		break;
8146 	}
8147 
8148 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
8149 			     cfg_get(ctrl_psoc, CFG_DP_RX_HASH));
8150 	soc->cce_disable = false;
8151 
8152 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
8153 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
8154 				CDP_CFG_MAX_PEER_ID);
8155 
8156 		if (ret != -EINVAL) {
8157 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
8158 		}
8159 
8160 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
8161 				CDP_CFG_CCE_DISABLE);
8162 		if (ret == 1)
8163 			soc->cce_disable = true;
8164 	}
8165 
8166 	qdf_spinlock_create(&soc->peer_ref_mutex);
8167 
8168 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
8169 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
8170 
8171 	/* fill the tx/rx cpu ring map*/
8172 	dp_soc_set_txrx_ring_map(soc);
8173 
8174 	qdf_spinlock_create(&soc->htt_stats.lock);
8175 	/* initialize work queue for stats processing */
8176 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
8177 
8178 	/*Initialize inactivity timer for wifison */
8179 	dp_init_inact_timer(soc);
8180 
8181 	return (void *)soc;
8182 
8183 fail2:
8184 	htt_soc_detach(soc->htt_handle);
8185 fail1:
8186 	qdf_mem_free(soc);
8187 fail0:
8188 	return NULL;
8189 }
8190 #endif
8191 
8192 /*
8193  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
8194  *
8195  * @soc: handle to DP soc
8196  * @mac_id: MAC id
8197  *
8198  * Return: Return pdev corresponding to MAC
8199  */
8200 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
8201 {
8202 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
8203 		return soc->pdev_list[mac_id];
8204 
8205 	/* Typically for MCL as there only 1 PDEV*/
8206 	return soc->pdev_list[0];
8207 }
8208 
8209 /*
8210  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
8211  * @soc:		DP SoC context
8212  * @max_mac_rings:	No of MAC rings
8213  *
8214  * Return: None
8215  */
8216 static
8217 void dp_is_hw_dbs_enable(struct dp_soc *soc,
8218 				int *max_mac_rings)
8219 {
8220 	bool dbs_enable = false;
8221 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
8222 		dbs_enable = soc->cdp_soc.ol_ops->
8223 		is_hw_dbs_2x2_capable(soc->ctrl_psoc);
8224 
8225 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
8226 }
8227 
8228 /*
8229 * dp_set_pktlog_wifi3() - attach txrx vdev
8230 * @pdev: Datapath PDEV handle
8231 * @event: which event's notifications are being subscribed to
8232 * @enable: WDI event subscribe or not. (True or False)
8233 *
8234 * Return: Success, NULL on failure
8235 */
8236 #ifdef WDI_EVENT_ENABLE
8237 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
8238 	bool enable)
8239 {
8240 	struct dp_soc *soc = pdev->soc;
8241 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
8242 	int max_mac_rings = wlan_cfg_get_num_mac_rings
8243 					(pdev->wlan_cfg_ctx);
8244 	uint8_t mac_id = 0;
8245 
8246 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
8247 
8248 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
8249 			FL("Max_mac_rings %d "),
8250 			max_mac_rings);
8251 
8252 	if (enable) {
8253 		switch (event) {
8254 		case WDI_EVENT_RX_DESC:
8255 			if (pdev->monitor_vdev) {
8256 				/* Nothing needs to be done if monitor mode is
8257 				 * enabled
8258 				 */
8259 				return 0;
8260 			}
8261 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
8262 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
8263 				htt_tlv_filter.mpdu_start = 1;
8264 				htt_tlv_filter.msdu_start = 1;
8265 				htt_tlv_filter.msdu_end = 1;
8266 				htt_tlv_filter.mpdu_end = 1;
8267 				htt_tlv_filter.packet_header = 1;
8268 				htt_tlv_filter.attention = 1;
8269 				htt_tlv_filter.ppdu_start = 1;
8270 				htt_tlv_filter.ppdu_end = 1;
8271 				htt_tlv_filter.ppdu_end_user_stats = 1;
8272 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
8273 				htt_tlv_filter.ppdu_end_status_done = 1;
8274 				htt_tlv_filter.enable_fp = 1;
8275 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8276 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8277 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8278 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8279 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8280 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
8281 
8282 				for (mac_id = 0; mac_id < max_mac_rings;
8283 								mac_id++) {
8284 					int mac_for_pdev =
8285 						dp_get_mac_id_for_pdev(mac_id,
8286 								pdev->pdev_id);
8287 
8288 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8289 					 mac_for_pdev,
8290 					 pdev->rxdma_mon_status_ring[mac_id]
8291 					 .hal_srng,
8292 					 RXDMA_MONITOR_STATUS,
8293 					 RX_BUFFER_SIZE,
8294 					 &htt_tlv_filter);
8295 
8296 				}
8297 
8298 				if (soc->reap_timer_init)
8299 					qdf_timer_mod(&soc->mon_reap_timer,
8300 					DP_INTR_POLL_TIMER_MS);
8301 			}
8302 			break;
8303 
8304 		case WDI_EVENT_LITE_RX:
8305 			if (pdev->monitor_vdev) {
8306 				/* Nothing needs to be done if monitor mode is
8307 				 * enabled
8308 				 */
8309 				return 0;
8310 			}
8311 
8312 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
8313 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
8314 
8315 				htt_tlv_filter.ppdu_start = 1;
8316 				htt_tlv_filter.ppdu_end = 1;
8317 				htt_tlv_filter.ppdu_end_user_stats = 1;
8318 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
8319 				htt_tlv_filter.ppdu_end_status_done = 1;
8320 				htt_tlv_filter.mpdu_start = 1;
8321 				htt_tlv_filter.enable_fp = 1;
8322 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8323 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8324 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8325 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8326 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8327 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
8328 
8329 				for (mac_id = 0; mac_id < max_mac_rings;
8330 								mac_id++) {
8331 					int mac_for_pdev =
8332 						dp_get_mac_id_for_pdev(mac_id,
8333 								pdev->pdev_id);
8334 
8335 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8336 					mac_for_pdev,
8337 					pdev->rxdma_mon_status_ring[mac_id]
8338 					.hal_srng,
8339 					RXDMA_MONITOR_STATUS,
8340 					RX_BUFFER_SIZE_PKTLOG_LITE,
8341 					&htt_tlv_filter);
8342 				}
8343 
8344 				if (soc->reap_timer_init)
8345 					qdf_timer_mod(&soc->mon_reap_timer,
8346 					DP_INTR_POLL_TIMER_MS);
8347 			}
8348 			break;
8349 
8350 		case WDI_EVENT_LITE_T2H:
8351 			if (pdev->monitor_vdev) {
8352 				/* Nothing needs to be done if monitor mode is
8353 				 * enabled
8354 				 */
8355 				return 0;
8356 			}
8357 
8358 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
8359 				int mac_for_pdev = dp_get_mac_id_for_pdev(
8360 							mac_id,	pdev->pdev_id);
8361 
8362 				pdev->pktlog_ppdu_stats = true;
8363 				dp_h2t_cfg_stats_msg_send(pdev,
8364 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
8365 					mac_for_pdev);
8366 			}
8367 			break;
8368 
8369 		default:
8370 			/* Nothing needs to be done for other pktlog types */
8371 			break;
8372 		}
8373 	} else {
8374 		switch (event) {
8375 		case WDI_EVENT_RX_DESC:
8376 		case WDI_EVENT_LITE_RX:
8377 			if (pdev->monitor_vdev) {
8378 				/* Nothing needs to be done if monitor mode is
8379 				 * enabled
8380 				 */
8381 				return 0;
8382 			}
8383 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
8384 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
8385 
8386 				for (mac_id = 0; mac_id < max_mac_rings;
8387 								mac_id++) {
8388 					int mac_for_pdev =
8389 						dp_get_mac_id_for_pdev(mac_id,
8390 								pdev->pdev_id);
8391 
8392 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8393 					  mac_for_pdev,
8394 					  pdev->rxdma_mon_status_ring[mac_id]
8395 					  .hal_srng,
8396 					  RXDMA_MONITOR_STATUS,
8397 					  RX_BUFFER_SIZE,
8398 					  &htt_tlv_filter);
8399 				}
8400 
8401 				if (soc->reap_timer_init)
8402 					qdf_timer_stop(&soc->mon_reap_timer);
8403 			}
8404 			break;
8405 		case WDI_EVENT_LITE_T2H:
8406 			if (pdev->monitor_vdev) {
8407 				/* Nothing needs to be done if monitor mode is
8408 				 * enabled
8409 				 */
8410 				return 0;
8411 			}
8412 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
8413 			 * passing value 0. Once these macros will define in htt
8414 			 * header file will use proper macros
8415 			*/
8416 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
8417 				int mac_for_pdev =
8418 						dp_get_mac_id_for_pdev(mac_id,
8419 								pdev->pdev_id);
8420 
8421 				pdev->pktlog_ppdu_stats = false;
8422 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
8423 					dp_h2t_cfg_stats_msg_send(pdev, 0,
8424 								mac_for_pdev);
8425 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
8426 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
8427 								mac_for_pdev);
8428 				} else if (pdev->enhanced_stats_en) {
8429 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
8430 								mac_for_pdev);
8431 				}
8432 			}
8433 
8434 			break;
8435 		default:
8436 			/* Nothing needs to be done for other pktlog types */
8437 			break;
8438 		}
8439 	}
8440 	return 0;
8441 }
8442 #endif
8443