xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 1b9674e21e24478fba4530f5ae7396b9555e9c6a)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_htt.h"
31 #include "dp_types.h"
32 #include "dp_internal.h"
33 #include "dp_tx.h"
34 #include "dp_tx_desc.h"
35 #include "dp_rx.h"
36 #include <cdp_txrx_handle.h>
37 #include <wlan_cfg.h>
38 #include "cdp_txrx_cmn_struct.h"
39 #include "cdp_txrx_stats_struct.h"
40 #include "cdp_txrx_cmn_reg.h"
41 #include <qdf_util.h>
42 #include "dp_peer.h"
43 #include "dp_rx_mon.h"
44 #include "htt_stats.h"
45 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
46 #include "cfg_ucfg_api.h"
47 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
48 #include "cdp_txrx_flow_ctrl_v2.h"
49 #else
50 static inline void
51 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
52 {
53 	return;
54 }
55 #endif
56 #include "dp_ipa.h"
57 
58 #ifdef CONFIG_MCL
59 #ifndef REMOVE_PKT_LOG
60 #include <pktlog_ac_api.h>
61 #include <pktlog_ac.h>
62 #endif
63 #endif
64 static void dp_pktlogmod_exit(struct dp_pdev *handle);
65 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
66 				uint8_t *peer_mac_addr,
67 				struct cdp_ctrl_objmgr_peer *ctrl_peer);
68 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
69 static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
70 static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
71 
72 #define DP_INTR_POLL_TIMER_MS	10
73 #define DP_WDS_AGING_TIMER_DEFAULT_MS	120000
74 #define DP_MCS_LENGTH (6*MAX_MCS)
75 #define DP_NSS_LENGTH (6*SS_COUNT)
76 #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
77 #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
78 #define DP_MAX_MCS_STRING_LEN 30
79 #define DP_CURR_FW_STATS_AVAIL 19
80 #define DP_HTT_DBG_EXT_STATS_MAX 256
81 #define DP_MAX_SLEEP_TIME 100
82 
83 #ifdef IPA_OFFLOAD
84 /* Exclude IPA rings from the interrupt context */
85 #define TX_RING_MASK_VAL	0xb
86 #define RX_RING_MASK_VAL	0x7
87 #else
88 #define TX_RING_MASK_VAL	0xF
89 #define RX_RING_MASK_VAL	0xF
90 #endif
91 
92 #define STR_MAXLEN	64
93 
94 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
95 
96 /* PPDU stats mask sent to FW to enable enhanced stats */
97 #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
98 /* PPDU stats mask sent to FW to support debug sniffer feature */
99 #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
100 /* PPDU stats mask sent to FW to support BPR feature*/
101 #define DP_PPDU_STATS_CFG_BPR 0x2000
102 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
103 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
104 				   DP_PPDU_STATS_CFG_ENH_STATS)
105 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
106 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
107 				      DP_PPDU_TXLITE_STATS_BITMASK_CFG)
108 
109 #define RNG_ERR		"SRNG setup failed for"
110 /**
111  * default_dscp_tid_map - Default DSCP-TID mapping
112  *
113  * DSCP        TID
114  * 000000      0
115  * 001000      1
116  * 010000      2
117  * 011000      3
118  * 100000      4
119  * 101000      5
120  * 110000      6
121  * 111000      7
122  */
123 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
124 	0, 0, 0, 0, 0, 0, 0, 0,
125 	1, 1, 1, 1, 1, 1, 1, 1,
126 	2, 2, 2, 2, 2, 2, 2, 2,
127 	3, 3, 3, 3, 3, 3, 3, 3,
128 	4, 4, 4, 4, 4, 4, 4, 4,
129 	5, 5, 5, 5, 5, 5, 5, 5,
130 	6, 6, 6, 6, 6, 6, 6, 6,
131 	7, 7, 7, 7, 7, 7, 7, 7,
132 };
133 
134 /*
135  * struct dp_rate_debug
136  *
137  * @mcs_type: print string for a given mcs
138  * @valid: valid mcs rate?
139  */
140 struct dp_rate_debug {
141 	char mcs_type[DP_MAX_MCS_STRING_LEN];
142 	uint8_t valid;
143 };
144 
145 #define MCS_VALID 1
146 #define MCS_INVALID 0
147 
148 static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
149 
150 	{
151 		{"OFDM 48 Mbps", MCS_VALID},
152 		{"OFDM 24 Mbps", MCS_VALID},
153 		{"OFDM 12 Mbps", MCS_VALID},
154 		{"OFDM 6 Mbps ", MCS_VALID},
155 		{"OFDM 54 Mbps", MCS_VALID},
156 		{"OFDM 36 Mbps", MCS_VALID},
157 		{"OFDM 18 Mbps", MCS_VALID},
158 		{"OFDM 9 Mbps ", MCS_VALID},
159 		{"INVALID ", MCS_INVALID},
160 		{"INVALID ", MCS_INVALID},
161 		{"INVALID ", MCS_INVALID},
162 		{"INVALID ", MCS_INVALID},
163 		{"INVALID ", MCS_VALID},
164 	},
165 	{
166 		{"CCK 11 Mbps Long  ", MCS_VALID},
167 		{"CCK 5.5 Mbps Long ", MCS_VALID},
168 		{"CCK 2 Mbps Long   ", MCS_VALID},
169 		{"CCK 1 Mbps Long   ", MCS_VALID},
170 		{"CCK 11 Mbps Short ", MCS_VALID},
171 		{"CCK 5.5 Mbps Short", MCS_VALID},
172 		{"CCK 2 Mbps Short  ", MCS_VALID},
173 		{"INVALID ", MCS_INVALID},
174 		{"INVALID ", MCS_INVALID},
175 		{"INVALID ", MCS_INVALID},
176 		{"INVALID ", MCS_INVALID},
177 		{"INVALID ", MCS_INVALID},
178 		{"INVALID ", MCS_VALID},
179 	},
180 	{
181 		{"HT MCS 0 (BPSK 1/2)  ", MCS_VALID},
182 		{"HT MCS 1 (QPSK 1/2)  ", MCS_VALID},
183 		{"HT MCS 2 (QPSK 3/4)  ", MCS_VALID},
184 		{"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
185 		{"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
186 		{"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
187 		{"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
188 		{"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
189 		{"INVALID ", MCS_INVALID},
190 		{"INVALID ", MCS_INVALID},
191 		{"INVALID ", MCS_INVALID},
192 		{"INVALID ", MCS_INVALID},
193 		{"INVALID ", MCS_VALID},
194 	},
195 	{
196 		{"VHT MCS 0 (BPSK 1/2)     ", MCS_VALID},
197 		{"VHT MCS 1 (QPSK 1/2)     ", MCS_VALID},
198 		{"VHT MCS 2 (QPSK 3/4)     ", MCS_VALID},
199 		{"VHT MCS 3 (16-QAM 1/2)   ", MCS_VALID},
200 		{"VHT MCS 4 (16-QAM 3/4)   ", MCS_VALID},
201 		{"VHT MCS 5 (64-QAM 2/3)   ", MCS_VALID},
202 		{"VHT MCS 6 (64-QAM 3/4)   ", MCS_VALID},
203 		{"VHT MCS 7 (64-QAM 5/6)   ", MCS_VALID},
204 		{"VHT MCS 8 (256-QAM 3/4)  ", MCS_VALID},
205 		{"VHT MCS 9 (256-QAM 5/6)  ", MCS_VALID},
206 		{"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
207 		{"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
208 		{"INVALID ", MCS_VALID},
209 	},
210 	{
211 		{"HE MCS 0 (BPSK 1/2)     ", MCS_VALID},
212 		{"HE MCS 1 (QPSK 1/2)     ", MCS_VALID},
213 		{"HE MCS 2 (QPSK 3/4)     ", MCS_VALID},
214 		{"HE MCS 3 (16-QAM 1/2)   ", MCS_VALID},
215 		{"HE MCS 4 (16-QAM 3/4)   ", MCS_VALID},
216 		{"HE MCS 5 (64-QAM 2/3)   ", MCS_VALID},
217 		{"HE MCS 6 (64-QAM 3/4)   ", MCS_VALID},
218 		{"HE MCS 7 (64-QAM 5/6)   ", MCS_VALID},
219 		{"HE MCS 8 (256-QAM 3/4)  ", MCS_VALID},
220 		{"HE MCS 9 (256-QAM 5/6)  ", MCS_VALID},
221 		{"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
222 		{"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
223 		{"INVALID ", MCS_VALID},
224 	}
225 };
226 
227 /**
228  * @brief Cpu ring map types
229  */
230 enum dp_cpu_ring_map_types {
231 	DP_DEFAULT_MAP,
232 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
233 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
234 	DP_NSS_ALL_RADIO_OFFLOADED_MAP,
235 	DP_CPU_RING_MAP_MAX
236 };
237 
238 /**
239  * @brief Cpu to tx ring map
240  */
241 static uint8_t dp_cpu_ring_map[DP_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
242 	{0x0, 0x1, 0x2, 0x0},
243 	{0x1, 0x2, 0x1, 0x2},
244 	{0x0, 0x2, 0x0, 0x2},
245 	{0x2, 0x2, 0x2, 0x2}
246 };
247 
248 /**
249  * @brief Select the type of statistics
250  */
251 enum dp_stats_type {
252 	STATS_FW = 0,
253 	STATS_HOST = 1,
254 	STATS_TYPE_MAX = 2,
255 };
256 
257 /**
258  * @brief General Firmware statistics options
259  *
260  */
261 enum dp_fw_stats {
262 	TXRX_FW_STATS_INVALID	= -1,
263 };
264 
265 /**
266  * dp_stats_mapping_table - Firmware and Host statistics
267  * currently supported
268  */
269 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
270 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
271 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
272 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
273 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
274 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
275 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
276 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
277 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
278 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
279 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
280 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
281 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
282 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
283 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
284 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
285 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
286 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
287 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
288 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
289 	/* Last ENUM for HTT FW STATS */
290 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
291 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
292 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
293 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
294 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
295 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
296 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
297 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
298 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
299 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
300 };
301 
302 /* MCL specific functions */
303 #ifdef CONFIG_MCL
304 /**
305  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
306  * @soc: pointer to dp_soc handle
307  * @intr_ctx_num: interrupt context number for which mon mask is needed
308  *
309  * For MCL, monitor mode rings are being processed in timer contexts (polled).
310  * This function is returning 0, since in interrupt mode(softirq based RX),
311  * we donot want to process monitor mode rings in a softirq.
312  *
313  * So, in case packet log is enabled for SAP/STA/P2P modes,
314  * regular interrupt processing will not process monitor mode rings. It would be
315  * done in a separate timer context.
316  *
317  * Return: 0
318  */
319 static inline
320 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
321 {
322 	return 0;
323 }
324 
325 /*
326  * dp_service_mon_rings()- timer to reap monitor rings
327  * reqd as we are not getting ppdu end interrupts
328  * @arg: SoC Handle
329  *
330  * Return:
331  *
332  */
333 static void dp_service_mon_rings(void *arg)
334 {
335 	struct dp_soc *soc = (struct dp_soc *)arg;
336 	int ring = 0, work_done, mac_id;
337 	struct dp_pdev *pdev = NULL;
338 
339 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
340 		pdev = soc->pdev_list[ring];
341 		if (!pdev)
342 			continue;
343 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
344 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
345 								pdev->pdev_id);
346 			work_done = dp_mon_process(soc, mac_for_pdev,
347 						   QCA_NAPI_BUDGET);
348 
349 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
350 				  FL("Reaped %d descs from Monitor rings"),
351 				  work_done);
352 		}
353 	}
354 
355 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
356 }
357 
358 #ifndef REMOVE_PKT_LOG
359 /**
360  * dp_pkt_log_init() - API to initialize packet log
361  * @ppdev: physical device handle
362  * @scn: HIF context
363  *
364  * Return: none
365  */
366 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
367 {
368 	struct dp_pdev *handle = (struct dp_pdev *)ppdev;
369 
370 	if (handle->pkt_log_init) {
371 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
372 			  "%s: Packet log not initialized", __func__);
373 		return;
374 	}
375 
376 	pktlog_sethandle(&handle->pl_dev, scn);
377 	pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
378 
379 	if (pktlogmod_init(scn)) {
380 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
381 			  "%s: pktlogmod_init failed", __func__);
382 		handle->pkt_log_init = false;
383 	} else {
384 		handle->pkt_log_init = true;
385 	}
386 }
387 
388 /**
389  * dp_pkt_log_con_service() - connect packet log service
390  * @ppdev: physical device handle
391  * @scn: device context
392  *
393  * Return: none
394  */
395 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
396 {
397 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
398 
399 	dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
400 	pktlog_htc_attach();
401 }
402 
403 /**
404  * dp_pktlogmod_exit() - API to cleanup pktlog info
405  * @handle: Pdev handle
406  *
407  * Return: none
408  */
409 static void dp_pktlogmod_exit(struct dp_pdev *handle)
410 {
411 	void *scn = (void *)handle->soc->hif_handle;
412 
413 	if (!scn) {
414 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
415 			  "%s: Invalid hif(scn) handle", __func__);
416 		return;
417 	}
418 
419 	pktlogmod_exit(scn);
420 	handle->pkt_log_init = false;
421 }
422 #endif
423 #else
424 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
425 
426 /**
427  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
428  * @soc: pointer to dp_soc handle
429  * @intr_ctx_num: interrupt context number for which mon mask is needed
430  *
431  * Return: mon mask value
432  */
433 static inline
434 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
435 {
436 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
437 }
438 #endif
439 
440 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
441 					struct cdp_peer *peer_hdl,
442 					uint8_t *mac_addr,
443 					enum cdp_txrx_ast_entry_type type,
444 					uint32_t flags)
445 {
446 
447 	return dp_peer_add_ast((struct dp_soc *)soc_hdl,
448 				(struct dp_peer *)peer_hdl,
449 				mac_addr,
450 				type,
451 				flags);
452 }
453 
454 static void dp_peer_del_ast_wifi3(struct cdp_soc_t *soc_hdl,
455 					 void *ast_entry_hdl)
456 {
457 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
458 	qdf_spin_lock_bh(&soc->ast_lock);
459 	dp_peer_del_ast((struct dp_soc *)soc_hdl,
460 			(struct dp_ast_entry *)ast_entry_hdl);
461 	qdf_spin_unlock_bh(&soc->ast_lock);
462 }
463 
464 
465 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
466 						struct cdp_peer *peer_hdl,
467 						uint8_t *wds_macaddr,
468 						uint32_t flags)
469 {
470 	int status = -1;
471 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
472 	struct dp_ast_entry  *ast_entry = NULL;
473 
474 	qdf_spin_lock_bh(&soc->ast_lock);
475 	ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
476 
477 	if (ast_entry) {
478 		status = dp_peer_update_ast(soc,
479 					    (struct dp_peer *)peer_hdl,
480 					   ast_entry, flags);
481 	}
482 
483 	qdf_spin_unlock_bh(&soc->ast_lock);
484 
485 	return status;
486 }
487 
488 /*
489  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
490  * @soc_handle:		Datapath SOC handle
491  * @wds_macaddr:	WDS entry MAC Address
492  * Return: None
493  */
494 static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
495 				   uint8_t *wds_macaddr, void *vdev_handle)
496 {
497 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
498 	struct dp_ast_entry *ast_entry = NULL;
499 
500 	qdf_spin_lock_bh(&soc->ast_lock);
501 	ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
502 
503 	if (ast_entry) {
504 		if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
505 		    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF)) {
506 			ast_entry->is_active = TRUE;
507 		}
508 	}
509 
510 	qdf_spin_unlock_bh(&soc->ast_lock);
511 }
512 
513 /*
514  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
515  * @soc:		Datapath SOC handle
516  *
517  * Return: None
518  */
519 static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
520 					 void *vdev_hdl)
521 {
522 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
523 	struct dp_pdev *pdev;
524 	struct dp_vdev *vdev;
525 	struct dp_peer *peer;
526 	struct dp_ast_entry *ase, *temp_ase;
527 	int i;
528 
529 	qdf_spin_lock_bh(&soc->ast_lock);
530 
531 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
532 		pdev = soc->pdev_list[i];
533 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
534 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
535 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
536 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
537 					if ((ase->type ==
538 					     CDP_TXRX_AST_TYPE_STATIC) ||
539 					    (ase->type ==
540 					     CDP_TXRX_AST_TYPE_SELF))
541 						continue;
542 					ase->is_active = TRUE;
543 				}
544 			}
545 		}
546 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
547 	}
548 
549 	qdf_spin_unlock_bh(&soc->ast_lock);
550 }
551 
552 /*
553  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
554  * @soc:		Datapath SOC handle
555  *
556  * Return: None
557  */
558 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
559 {
560 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
561 	struct dp_pdev *pdev;
562 	struct dp_vdev *vdev;
563 	struct dp_peer *peer;
564 	struct dp_ast_entry *ase, *temp_ase;
565 	int i;
566 
567 	qdf_spin_lock_bh(&soc->ast_lock);
568 
569 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
570 		pdev = soc->pdev_list[i];
571 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
572 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
573 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
574 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
575 					if ((ase->type ==
576 					     CDP_TXRX_AST_TYPE_STATIC) ||
577 					    (ase->type ==
578 					     CDP_TXRX_AST_TYPE_SELF))
579 						continue;
580 					dp_peer_del_ast(soc, ase);
581 				}
582 			}
583 		}
584 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
585 	}
586 
587 	qdf_spin_unlock_bh(&soc->ast_lock);
588 }
589 
590 static void *dp_peer_ast_hash_find_wifi3(struct cdp_soc_t *soc_hdl,
591 						uint8_t *ast_mac_addr)
592 {
593 	struct dp_ast_entry *ast_entry;
594 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
595 	qdf_spin_lock_bh(&soc->ast_lock);
596 	ast_entry = dp_peer_ast_hash_find(soc, ast_mac_addr);
597 	qdf_spin_unlock_bh(&soc->ast_lock);
598 	return (void *)ast_entry;
599 }
600 
601 static uint8_t dp_peer_ast_get_pdev_id_wifi3(struct cdp_soc_t *soc_hdl,
602 							void *ast_entry_hdl)
603 {
604 	return dp_peer_ast_get_pdev_id((struct dp_soc *)soc_hdl,
605 					(struct dp_ast_entry *)ast_entry_hdl);
606 }
607 
608 static uint8_t dp_peer_ast_get_next_hop_wifi3(struct cdp_soc_t *soc_hdl,
609 							void *ast_entry_hdl)
610 {
611 	return dp_peer_ast_get_next_hop((struct dp_soc *)soc_hdl,
612 					(struct dp_ast_entry *)ast_entry_hdl);
613 }
614 
615 static void dp_peer_ast_set_type_wifi3(
616 					struct cdp_soc_t *soc_hdl,
617 					void *ast_entry_hdl,
618 					enum cdp_txrx_ast_entry_type type)
619 {
620 	dp_peer_ast_set_type((struct dp_soc *)soc_hdl,
621 				(struct dp_ast_entry *)ast_entry_hdl,
622 				type);
623 }
624 
625 static enum cdp_txrx_ast_entry_type dp_peer_ast_get_type_wifi3(
626 					struct cdp_soc_t *soc_hdl,
627 					void *ast_entry_hdl)
628 {
629 	return ((struct dp_ast_entry *)ast_entry_hdl)->type;
630 }
631 
632 /**
633  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
634  * @ring_num: ring num of the ring being queried
635  * @grp_mask: the grp_mask array for the ring type in question.
636  *
637  * The grp_mask array is indexed by group number and the bit fields correspond
638  * to ring numbers.  We are finding which interrupt group a ring belongs to.
639  *
640  * Return: the index in the grp_mask array with the ring number.
641  * -QDF_STATUS_E_NOENT if no entry is found
642  */
643 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
644 {
645 	int ext_group_num;
646 	int mask = 1 << ring_num;
647 
648 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
649 	     ext_group_num++) {
650 		if (mask & grp_mask[ext_group_num])
651 			return ext_group_num;
652 	}
653 
654 	return -QDF_STATUS_E_NOENT;
655 }
656 
657 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
658 				       enum hal_ring_type ring_type,
659 				       int ring_num)
660 {
661 	int *grp_mask;
662 
663 	switch (ring_type) {
664 	case WBM2SW_RELEASE:
665 		/* dp_tx_comp_handler - soc->tx_comp_ring */
666 		if (ring_num < 3)
667 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
668 
669 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
670 		else if (ring_num == 3) {
671 			/* sw treats this as a separate ring type */
672 			grp_mask = &soc->wlan_cfg_ctx->
673 				int_rx_wbm_rel_ring_mask[0];
674 			ring_num = 0;
675 		} else {
676 			qdf_assert(0);
677 			return -QDF_STATUS_E_NOENT;
678 		}
679 	break;
680 
681 	case REO_EXCEPTION:
682 		/* dp_rx_err_process - &soc->reo_exception_ring */
683 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
684 	break;
685 
686 	case REO_DST:
687 		/* dp_rx_process - soc->reo_dest_ring */
688 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
689 	break;
690 
691 	case REO_STATUS:
692 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
693 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
694 	break;
695 
696 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
697 	case RXDMA_MONITOR_STATUS:
698 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
699 	case RXDMA_MONITOR_DST:
700 		/* dp_mon_process */
701 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
702 	break;
703 	case RXDMA_DST:
704 		/* dp_rxdma_err_process */
705 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
706 	break;
707 
708 	case RXDMA_BUF:
709 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
710 	break;
711 
712 	case RXDMA_MONITOR_BUF:
713 		/* TODO: support low_thresh interrupt */
714 		return -QDF_STATUS_E_NOENT;
715 	break;
716 
717 	case TCL_DATA:
718 	case TCL_CMD:
719 	case REO_CMD:
720 	case SW2WBM_RELEASE:
721 	case WBM_IDLE_LINK:
722 		/* normally empty SW_TO_HW rings */
723 		return -QDF_STATUS_E_NOENT;
724 	break;
725 
726 	case TCL_STATUS:
727 	case REO_REINJECT:
728 		/* misc unused rings */
729 		return -QDF_STATUS_E_NOENT;
730 	break;
731 
732 	case CE_SRC:
733 	case CE_DST:
734 	case CE_DST_STATUS:
735 		/* CE_rings - currently handled by hif */
736 	default:
737 		return -QDF_STATUS_E_NOENT;
738 	break;
739 	}
740 
741 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
742 }
743 
744 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
745 			      *ring_params, int ring_type, int ring_num)
746 {
747 	int msi_group_number;
748 	int msi_data_count;
749 	int ret;
750 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
751 
752 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
753 					    &msi_data_count, &msi_data_start,
754 					    &msi_irq_start);
755 
756 	if (ret)
757 		return;
758 
759 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
760 						       ring_num);
761 	if (msi_group_number < 0) {
762 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
763 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
764 			ring_type, ring_num);
765 		ring_params->msi_addr = 0;
766 		ring_params->msi_data = 0;
767 		return;
768 	}
769 
770 	if (msi_group_number > msi_data_count) {
771 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
772 			FL("2 msi_groups will share an msi; msi_group_num %d"),
773 			msi_group_number);
774 
775 		QDF_ASSERT(0);
776 	}
777 
778 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
779 
780 	ring_params->msi_addr = addr_low;
781 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
782 	ring_params->msi_data = (msi_group_number % msi_data_count)
783 		+ msi_data_start;
784 	ring_params->flags |= HAL_SRNG_MSI_INTR;
785 }
786 
787 /**
788  * dp_print_ast_stats() - Dump AST table contents
789  * @soc: Datapath soc handle
790  *
791  * return void
792  */
793 #ifdef FEATURE_AST
794 static void dp_print_ast_stats(struct dp_soc *soc)
795 {
796 	uint8_t i;
797 	uint8_t num_entries = 0;
798 	struct dp_vdev *vdev;
799 	struct dp_pdev *pdev;
800 	struct dp_peer *peer;
801 	struct dp_ast_entry *ase, *tmp_ase;
802 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
803 			"NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS"};
804 
805 	DP_PRINT_STATS("AST Stats:");
806 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
807 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
808 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
809 	DP_PRINT_STATS("AST Table:");
810 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
811 		pdev = soc->pdev_list[i];
812 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
813 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
814 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
815 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
816 					DP_PRINT_STATS("%6d mac_addr = %pM"
817 							" peer_mac_addr = %pM"
818 							" type = %s"
819 							" next_hop = %d"
820 							" is_active = %d"
821 							" is_bss = %d"
822 							" ast_idx = %d"
823 							" pdev_id = %d"
824 							" vdev_id = %d",
825 							++num_entries,
826 							ase->mac_addr.raw,
827 							ase->peer->mac_addr.raw,
828 							type[ase->type],
829 							ase->next_hop,
830 							ase->is_active,
831 							ase->is_bss,
832 							ase->ast_idx,
833 							ase->pdev_id,
834 							ase->vdev_id);
835 				}
836 			}
837 		}
838 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
839 	}
840 }
841 #else
842 static void dp_print_ast_stats(struct dp_soc *soc)
843 {
844 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
845 	return;
846 }
847 #endif
848 
849 static void dp_print_peer_table(struct dp_vdev *vdev)
850 {
851 	struct dp_peer *peer = NULL;
852 
853 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
854 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
855 		if (!peer) {
856 			DP_PRINT_STATS("Invalid Peer");
857 			return;
858 		}
859 		DP_PRINT_STATS("    peer_mac_addr = %pM"
860 			" nawds_enabled = %d"
861 			" bss_peer = %d"
862 			" wapi = %d"
863 			" wds_enabled = %d"
864 			" delete in progress = %d",
865 			peer->mac_addr.raw,
866 			peer->nawds_enabled,
867 			peer->bss_peer,
868 			peer->wapi,
869 			peer->wds_enabled,
870 			peer->delete_in_progress);
871 	}
872 }
873 
874 /*
875  * dp_setup_srng - Internal function to setup SRNG rings used by data path
876  */
877 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
878 	int ring_type, int ring_num, int mac_id, uint32_t num_entries)
879 {
880 	void *hal_soc = soc->hal_soc;
881 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
882 	/* TODO: See if we should get align size from hal */
883 	uint32_t ring_base_align = 8;
884 	struct hal_srng_params ring_params;
885 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
886 
887 	/* TODO: Currently hal layer takes care of endianness related settings.
888 	 * See if these settings need to passed from DP layer
889 	 */
890 	ring_params.flags = 0;
891 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
892 		  FL("Ring type: %d, num:%d"), ring_type, ring_num);
893 
894 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
895 	srng->hal_srng = NULL;
896 	srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
897 	srng->num_entries = num_entries;
898 	srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
899 		soc->osdev, soc->osdev->dev, srng->alloc_size,
900 		&(srng->base_paddr_unaligned));
901 
902 	if (!srng->base_vaddr_unaligned) {
903 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
904 			FL("alloc failed - ring_type: %d, ring_num %d"),
905 			ring_type, ring_num);
906 		return QDF_STATUS_E_NOMEM;
907 	}
908 
909 	ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
910 		((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
911 	ring_params.ring_base_paddr = srng->base_paddr_unaligned +
912 		((unsigned long)(ring_params.ring_base_vaddr) -
913 		(unsigned long)srng->base_vaddr_unaligned);
914 	ring_params.num_entries = num_entries;
915 
916 	if (soc->intr_mode == DP_INTR_MSI) {
917 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
918 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
919 			  FL("Using MSI for ring_type: %d, ring_num %d"),
920 			  ring_type, ring_num);
921 
922 	} else {
923 		ring_params.msi_data = 0;
924 		ring_params.msi_addr = 0;
925 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
926 			  FL("Skipping MSI for ring_type: %d, ring_num %d"),
927 			  ring_type, ring_num);
928 	}
929 
930 	/*
931 	 * Setup interrupt timer and batch counter thresholds for
932 	 * interrupt mitigation based on ring type
933 	 */
934 	if (ring_type == REO_DST) {
935 		ring_params.intr_timer_thres_us =
936 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
937 		ring_params.intr_batch_cntr_thres_entries =
938 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
939 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
940 		ring_params.intr_timer_thres_us =
941 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
942 		ring_params.intr_batch_cntr_thres_entries =
943 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
944 	} else {
945 		ring_params.intr_timer_thres_us =
946 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
947 		ring_params.intr_batch_cntr_thres_entries =
948 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
949 	}
950 
951 	/* Enable low threshold interrupts for rx buffer rings (regular and
952 	 * monitor buffer rings.
953 	 * TODO: See if this is required for any other ring
954 	 */
955 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
956 		(ring_type == RXDMA_MONITOR_STATUS)) {
957 		/* TODO: Setting low threshold to 1/8th of ring size
958 		 * see if this needs to be configurable
959 		 */
960 		ring_params.low_threshold = num_entries >> 3;
961 		ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
962 		ring_params.intr_timer_thres_us =
963 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
964 		ring_params.intr_batch_cntr_thres_entries = 0;
965 	}
966 
967 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
968 		mac_id, &ring_params);
969 
970 	if (!srng->hal_srng) {
971 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
972 				srng->alloc_size,
973 				srng->base_vaddr_unaligned,
974 				srng->base_paddr_unaligned, 0);
975 	}
976 
977 	return 0;
978 }
979 
980 /**
981  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
982  * Any buffers allocated and attached to ring entries are expected to be freed
983  * before calling this function.
984  */
985 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
986 	int ring_type, int ring_num)
987 {
988 	if (!srng->hal_srng) {
989 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
990 			FL("Ring type: %d, num:%d not setup"),
991 			ring_type, ring_num);
992 		return;
993 	}
994 
995 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
996 
997 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
998 				srng->alloc_size,
999 				srng->base_vaddr_unaligned,
1000 				srng->base_paddr_unaligned, 0);
1001 	srng->hal_srng = NULL;
1002 }
1003 
1004 /* TODO: Need this interface from HIF */
1005 void *hif_get_hal_handle(void *hif_handle);
1006 
1007 /*
1008  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1009  * @dp_ctx: DP SOC handle
1010  * @budget: Number of frames/descriptors that can be processed in one shot
1011  *
1012  * Return: remaining budget/quota for the soc device
1013  */
1014 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
1015 {
1016 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1017 	struct dp_soc *soc = int_ctx->soc;
1018 	int ring = 0;
1019 	uint32_t work_done  = 0;
1020 	int budget = dp_budget;
1021 	uint8_t tx_mask = int_ctx->tx_ring_mask;
1022 	uint8_t rx_mask = int_ctx->rx_ring_mask;
1023 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1024 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1025 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1026 	uint32_t remaining_quota = dp_budget;
1027 	struct dp_pdev *pdev = NULL;
1028 	int mac_id;
1029 
1030 	/* Process Tx completion interrupts first to return back buffers */
1031 	while (tx_mask) {
1032 		if (tx_mask & 0x1) {
1033 			work_done = dp_tx_comp_handler(soc,
1034 					soc->tx_comp_ring[ring].hal_srng,
1035 					remaining_quota);
1036 
1037 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1038 				"tx mask 0x%x ring %d, budget %d, work_done %d",
1039 				tx_mask, ring, budget, work_done);
1040 
1041 			budget -= work_done;
1042 			if (budget <= 0)
1043 				goto budget_done;
1044 
1045 			remaining_quota = budget;
1046 		}
1047 		tx_mask = tx_mask >> 1;
1048 		ring++;
1049 	}
1050 
1051 
1052 	/* Process REO Exception ring interrupt */
1053 	if (rx_err_mask) {
1054 		work_done = dp_rx_err_process(soc,
1055 				soc->reo_exception_ring.hal_srng,
1056 				remaining_quota);
1057 
1058 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1059 			"REO Exception Ring: work_done %d budget %d",
1060 			work_done, budget);
1061 
1062 		budget -=  work_done;
1063 		if (budget <= 0) {
1064 			goto budget_done;
1065 		}
1066 		remaining_quota = budget;
1067 	}
1068 
1069 	/* Process Rx WBM release ring interrupt */
1070 	if (rx_wbm_rel_mask) {
1071 		work_done = dp_rx_wbm_err_process(soc,
1072 				soc->rx_rel_ring.hal_srng, remaining_quota);
1073 
1074 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1075 			"WBM Release Ring: work_done %d budget %d",
1076 			work_done, budget);
1077 
1078 		budget -=  work_done;
1079 		if (budget <= 0) {
1080 			goto budget_done;
1081 		}
1082 		remaining_quota = budget;
1083 	}
1084 
1085 	/* Process Rx interrupts */
1086 	if (rx_mask) {
1087 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1088 			if (rx_mask & (1 << ring)) {
1089 				work_done = dp_rx_process(int_ctx,
1090 					    soc->reo_dest_ring[ring].hal_srng,
1091 					    remaining_quota);
1092 
1093 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1094 					"rx mask 0x%x ring %d, work_done %d budget %d",
1095 					rx_mask, ring, work_done, budget);
1096 
1097 				budget -=  work_done;
1098 				if (budget <= 0)
1099 					goto budget_done;
1100 				remaining_quota = budget;
1101 			}
1102 		}
1103 		for (ring = 0; ring < MAX_RX_MAC_RINGS; ring++) {
1104 			work_done = dp_rxdma_err_process(soc, ring,
1105 						remaining_quota);
1106 			budget -= work_done;
1107 		}
1108 	}
1109 
1110 	if (reo_status_mask)
1111 		dp_reo_status_ring_handler(soc);
1112 
1113 	/* Process LMAC interrupts */
1114 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
1115 		pdev = soc->pdev_list[ring];
1116 		if (pdev == NULL)
1117 			continue;
1118 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1119 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
1120 								pdev->pdev_id);
1121 
1122 			if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1123 				work_done = dp_mon_process(soc, mac_for_pdev,
1124 						remaining_quota);
1125 				budget -= work_done;
1126 				if (budget <= 0)
1127 					goto budget_done;
1128 				remaining_quota = budget;
1129 			}
1130 
1131 			if (int_ctx->rxdma2host_ring_mask &
1132 					(1 << mac_for_pdev)) {
1133 				work_done = dp_rxdma_err_process(soc,
1134 							mac_for_pdev,
1135 							remaining_quota);
1136 				budget -=  work_done;
1137 				if (budget <= 0)
1138 					goto budget_done;
1139 				remaining_quota = budget;
1140 			}
1141 
1142 			if (int_ctx->host2rxdma_ring_mask &
1143 						(1 << mac_for_pdev)) {
1144 				union dp_rx_desc_list_elem_t *desc_list = NULL;
1145 				union dp_rx_desc_list_elem_t *tail = NULL;
1146 				struct dp_srng *rx_refill_buf_ring =
1147 					&pdev->rx_refill_buf_ring;
1148 
1149 				DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1150 						1);
1151 				dp_rx_buffers_replenish(soc, mac_for_pdev,
1152 					rx_refill_buf_ring,
1153 					&soc->rx_desc_buf[mac_for_pdev], 0,
1154 					&desc_list, &tail);
1155 			}
1156 		}
1157 	}
1158 
1159 	qdf_lro_flush(int_ctx->lro_ctx);
1160 
1161 budget_done:
1162 	return dp_budget - budget;
1163 }
1164 
1165 #ifdef DP_INTR_POLL_BASED
1166 /* dp_interrupt_timer()- timer poll for interrupts
1167  *
1168  * @arg: SoC Handle
1169  *
1170  * Return:
1171  *
1172  */
1173 static void dp_interrupt_timer(void *arg)
1174 {
1175 	struct dp_soc *soc = (struct dp_soc *) arg;
1176 	int i;
1177 
1178 	if (qdf_atomic_read(&soc->cmn_init_done)) {
1179 		for (i = 0;
1180 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1181 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
1182 
1183 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1184 	}
1185 }
1186 
1187 /*
1188  * dp_soc_interrupt_attach_poll() - Register handlers for DP interrupts
1189  * @txrx_soc: DP SOC handle
1190  *
1191  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1192  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1193  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1194  *
1195  * Return: 0 for success. nonzero for failure.
1196  */
1197 static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
1198 {
1199 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1200 	int i;
1201 
1202 	soc->intr_mode = DP_INTR_POLL;
1203 
1204 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1205 		soc->intr_ctx[i].dp_intr_id = i;
1206 		soc->intr_ctx[i].tx_ring_mask =
1207 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1208 		soc->intr_ctx[i].rx_ring_mask =
1209 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1210 		soc->intr_ctx[i].rx_mon_ring_mask =
1211 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1212 		soc->intr_ctx[i].rx_err_ring_mask =
1213 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1214 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1215 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1216 		soc->intr_ctx[i].reo_status_ring_mask =
1217 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1218 		soc->intr_ctx[i].rxdma2host_ring_mask =
1219 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1220 		soc->intr_ctx[i].soc = soc;
1221 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1222 	}
1223 
1224 	qdf_timer_init(soc->osdev, &soc->int_timer,
1225 			dp_interrupt_timer, (void *)soc,
1226 			QDF_TIMER_TYPE_WAKE_APPS);
1227 
1228 	return QDF_STATUS_SUCCESS;
1229 }
1230 #else
1231 static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
1232 {
1233 	return -QDF_STATUS_E_NOSUPPORT;
1234 }
1235 #endif
1236 
1237 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
1238 #if defined(CONFIG_MCL)
1239 extern int con_mode_monitor;
1240 /*
1241  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1242  * @txrx_soc: DP SOC handle
1243  *
1244  * Call the appropriate attach function based on the mode of operation.
1245  * This is a WAR for enabling monitor mode.
1246  *
1247  * Return: 0 for success. nonzero for failure.
1248  */
1249 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1250 {
1251 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1252 
1253 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1254 	     con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
1255 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1256 				  "%s: Poll mode", __func__);
1257 		return dp_soc_attach_poll(txrx_soc);
1258 	} else {
1259 
1260 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1261 				  "%s: Interrupt  mode", __func__);
1262 		return dp_soc_interrupt_attach(txrx_soc);
1263 	}
1264 }
1265 #else
1266 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1267 {
1268 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1269 
1270 	if (hif_is_polled_mode_enabled(soc->hif_handle))
1271 		return dp_soc_attach_poll(txrx_soc);
1272 	else
1273 		return dp_soc_interrupt_attach(txrx_soc);
1274 }
1275 #endif
1276 
1277 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1278 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1279 {
1280 	int j;
1281 	int num_irq = 0;
1282 
1283 	int tx_mask =
1284 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1285 	int rx_mask =
1286 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1287 	int rx_mon_mask =
1288 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1289 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1290 					soc->wlan_cfg_ctx, intr_ctx_num);
1291 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1292 					soc->wlan_cfg_ctx, intr_ctx_num);
1293 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1294 					soc->wlan_cfg_ctx, intr_ctx_num);
1295 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1296 					soc->wlan_cfg_ctx, intr_ctx_num);
1297 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1298 					soc->wlan_cfg_ctx, intr_ctx_num);
1299 
1300 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1301 
1302 		if (tx_mask & (1 << j)) {
1303 			irq_id_map[num_irq++] =
1304 				(wbm2host_tx_completions_ring1 - j);
1305 		}
1306 
1307 		if (rx_mask & (1 << j)) {
1308 			irq_id_map[num_irq++] =
1309 				(reo2host_destination_ring1 - j);
1310 		}
1311 
1312 		if (rxdma2host_ring_mask & (1 << j)) {
1313 			irq_id_map[num_irq++] =
1314 				rxdma2host_destination_ring_mac1 -
1315 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1316 		}
1317 
1318 		if (host2rxdma_ring_mask & (1 << j)) {
1319 			irq_id_map[num_irq++] =
1320 				host2rxdma_host_buf_ring_mac1 -
1321 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1322 		}
1323 
1324 		if (rx_mon_mask & (1 << j)) {
1325 			irq_id_map[num_irq++] =
1326 				ppdu_end_interrupts_mac1 -
1327 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1328 			irq_id_map[num_irq++] =
1329 				rxdma2host_monitor_status_ring_mac1 -
1330 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1331 		}
1332 
1333 		if (rx_wbm_rel_ring_mask & (1 << j))
1334 			irq_id_map[num_irq++] = wbm2host_rx_release;
1335 
1336 		if (rx_err_ring_mask & (1 << j))
1337 			irq_id_map[num_irq++] = reo2host_exception;
1338 
1339 		if (reo_status_ring_mask & (1 << j))
1340 			irq_id_map[num_irq++] = reo2host_status;
1341 
1342 	}
1343 	*num_irq_r = num_irq;
1344 }
1345 
1346 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1347 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1348 		int msi_vector_count, int msi_vector_start)
1349 {
1350 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1351 					soc->wlan_cfg_ctx, intr_ctx_num);
1352 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1353 					soc->wlan_cfg_ctx, intr_ctx_num);
1354 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1355 					soc->wlan_cfg_ctx, intr_ctx_num);
1356 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1357 					soc->wlan_cfg_ctx, intr_ctx_num);
1358 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1359 					soc->wlan_cfg_ctx, intr_ctx_num);
1360 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1361 					soc->wlan_cfg_ctx, intr_ctx_num);
1362 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1363 					soc->wlan_cfg_ctx, intr_ctx_num);
1364 
1365 	unsigned int vector =
1366 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1367 	int num_irq = 0;
1368 
1369 	soc->intr_mode = DP_INTR_MSI;
1370 
1371 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1372 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1373 		irq_id_map[num_irq++] =
1374 			pld_get_msi_irq(soc->osdev->dev, vector);
1375 
1376 	*num_irq_r = num_irq;
1377 }
1378 
1379 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1380 				    int *irq_id_map, int *num_irq)
1381 {
1382 	int msi_vector_count, ret;
1383 	uint32_t msi_base_data, msi_vector_start;
1384 
1385 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1386 					    &msi_vector_count,
1387 					    &msi_base_data,
1388 					    &msi_vector_start);
1389 	if (ret)
1390 		return dp_soc_interrupt_map_calculate_integrated(soc,
1391 				intr_ctx_num, irq_id_map, num_irq);
1392 
1393 	else
1394 		dp_soc_interrupt_map_calculate_msi(soc,
1395 				intr_ctx_num, irq_id_map, num_irq,
1396 				msi_vector_count, msi_vector_start);
1397 }
1398 
1399 /*
1400  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1401  * @txrx_soc: DP SOC handle
1402  *
1403  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1404  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1405  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1406  *
1407  * Return: 0 for success. nonzero for failure.
1408  */
1409 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
1410 {
1411 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1412 
1413 	int i = 0;
1414 	int num_irq = 0;
1415 
1416 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1417 		int ret = 0;
1418 
1419 		/* Map of IRQ ids registered with one interrupt context */
1420 		int irq_id_map[HIF_MAX_GRP_IRQ];
1421 
1422 		int tx_mask =
1423 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1424 		int rx_mask =
1425 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1426 		int rx_mon_mask =
1427 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
1428 		int rx_err_ring_mask =
1429 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1430 		int rx_wbm_rel_ring_mask =
1431 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1432 		int reo_status_ring_mask =
1433 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1434 		int rxdma2host_ring_mask =
1435 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1436 		int host2rxdma_ring_mask =
1437 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1438 
1439 
1440 		soc->intr_ctx[i].dp_intr_id = i;
1441 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1442 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1443 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1444 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1445 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1446 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1447 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1448 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1449 
1450 		soc->intr_ctx[i].soc = soc;
1451 
1452 		num_irq = 0;
1453 
1454 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1455 					       &num_irq);
1456 
1457 		ret = hif_register_ext_group(soc->hif_handle,
1458 				num_irq, irq_id_map, dp_service_srngs,
1459 				&soc->intr_ctx[i], "dp_intr",
1460 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1461 
1462 		if (ret) {
1463 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1464 			FL("failed, ret = %d"), ret);
1465 
1466 			return QDF_STATUS_E_FAILURE;
1467 		}
1468 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1469 	}
1470 
1471 	hif_configure_ext_group_interrupts(soc->hif_handle);
1472 
1473 	return QDF_STATUS_SUCCESS;
1474 }
1475 
1476 /*
1477  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1478  * @txrx_soc: DP SOC handle
1479  *
1480  * Return: void
1481  */
1482 static void dp_soc_interrupt_detach(void *txrx_soc)
1483 {
1484 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1485 	int i;
1486 
1487 	if (soc->intr_mode == DP_INTR_POLL) {
1488 		qdf_timer_stop(&soc->int_timer);
1489 		qdf_timer_free(&soc->int_timer);
1490 	} else {
1491 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1492 	}
1493 
1494 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1495 		soc->intr_ctx[i].tx_ring_mask = 0;
1496 		soc->intr_ctx[i].rx_ring_mask = 0;
1497 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1498 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1499 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1500 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1501 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1502 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1503 
1504 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1505 	}
1506 }
1507 
1508 #define AVG_MAX_MPDUS_PER_TID 128
1509 #define AVG_TIDS_PER_CLIENT 2
1510 #define AVG_FLOWS_PER_TID 2
1511 #define AVG_MSDUS_PER_FLOW 128
1512 #define AVG_MSDUS_PER_MPDU 4
1513 
1514 /*
1515  * Allocate and setup link descriptor pool that will be used by HW for
1516  * various link and queue descriptors and managed by WBM
1517  */
1518 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1519 {
1520 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1521 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1522 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1523 	uint32_t num_mpdus_per_link_desc =
1524 		hal_num_mpdus_per_link_desc(soc->hal_soc);
1525 	uint32_t num_msdus_per_link_desc =
1526 		hal_num_msdus_per_link_desc(soc->hal_soc);
1527 	uint32_t num_mpdu_links_per_queue_desc =
1528 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1529 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1530 	uint32_t total_link_descs, total_mem_size;
1531 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1532 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1533 	uint32_t num_link_desc_banks;
1534 	uint32_t last_bank_size = 0;
1535 	uint32_t entry_size, num_entries;
1536 	int i;
1537 	uint32_t desc_id = 0;
1538 
1539 	/* Only Tx queue descriptors are allocated from common link descriptor
1540 	 * pool Rx queue descriptors are not included in this because (REO queue
1541 	 * extension descriptors) they are expected to be allocated contiguously
1542 	 * with REO queue descriptors
1543 	 */
1544 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1545 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1546 
1547 	num_mpdu_queue_descs = num_mpdu_link_descs /
1548 		num_mpdu_links_per_queue_desc;
1549 
1550 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1551 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1552 		num_msdus_per_link_desc;
1553 
1554 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1555 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1556 
1557 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1558 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1559 
1560 	/* Round up to power of 2 */
1561 	total_link_descs = 1;
1562 	while (total_link_descs < num_entries)
1563 		total_link_descs <<= 1;
1564 
1565 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1566 		FL("total_link_descs: %u, link_desc_size: %d"),
1567 		total_link_descs, link_desc_size);
1568 	total_mem_size =  total_link_descs * link_desc_size;
1569 
1570 	total_mem_size += link_desc_align;
1571 
1572 	if (total_mem_size <= max_alloc_size) {
1573 		num_link_desc_banks = 0;
1574 		last_bank_size = total_mem_size;
1575 	} else {
1576 		num_link_desc_banks = (total_mem_size) /
1577 			(max_alloc_size - link_desc_align);
1578 		last_bank_size = total_mem_size %
1579 			(max_alloc_size - link_desc_align);
1580 	}
1581 
1582 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1583 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
1584 		total_mem_size, num_link_desc_banks);
1585 
1586 	for (i = 0; i < num_link_desc_banks; i++) {
1587 		soc->link_desc_banks[i].base_vaddr_unaligned =
1588 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1589 			max_alloc_size,
1590 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1591 		soc->link_desc_banks[i].size = max_alloc_size;
1592 
1593 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1594 			soc->link_desc_banks[i].base_vaddr_unaligned) +
1595 			((unsigned long)(
1596 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1597 			link_desc_align));
1598 
1599 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
1600 			soc->link_desc_banks[i].base_paddr_unaligned) +
1601 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1602 			(unsigned long)(
1603 			soc->link_desc_banks[i].base_vaddr_unaligned));
1604 
1605 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
1606 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1607 				FL("Link descriptor memory alloc failed"));
1608 			goto fail;
1609 		}
1610 	}
1611 
1612 	if (last_bank_size) {
1613 		/* Allocate last bank in case total memory required is not exact
1614 		 * multiple of max_alloc_size
1615 		 */
1616 		soc->link_desc_banks[i].base_vaddr_unaligned =
1617 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1618 			last_bank_size,
1619 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1620 		soc->link_desc_banks[i].size = last_bank_size;
1621 
1622 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1623 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
1624 			((unsigned long)(
1625 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1626 			link_desc_align));
1627 
1628 		soc->link_desc_banks[i].base_paddr =
1629 			(unsigned long)(
1630 			soc->link_desc_banks[i].base_paddr_unaligned) +
1631 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1632 			(unsigned long)(
1633 			soc->link_desc_banks[i].base_vaddr_unaligned));
1634 	}
1635 
1636 
1637 	/* Allocate and setup link descriptor idle list for HW internal use */
1638 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1639 	total_mem_size = entry_size * total_link_descs;
1640 
1641 	if (total_mem_size <= max_alloc_size) {
1642 		void *desc;
1643 
1644 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
1645 			WBM_IDLE_LINK, 0, 0, total_link_descs)) {
1646 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1647 				FL("Link desc idle ring setup failed"));
1648 			goto fail;
1649 		}
1650 
1651 		hal_srng_access_start_unlocked(soc->hal_soc,
1652 			soc->wbm_idle_link_ring.hal_srng);
1653 
1654 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1655 			soc->link_desc_banks[i].base_paddr; i++) {
1656 			uint32_t num_entries = (soc->link_desc_banks[i].size -
1657 				((unsigned long)(
1658 				soc->link_desc_banks[i].base_vaddr) -
1659 				(unsigned long)(
1660 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1661 				/ link_desc_size;
1662 			unsigned long paddr = (unsigned long)(
1663 				soc->link_desc_banks[i].base_paddr);
1664 
1665 			while (num_entries && (desc = hal_srng_src_get_next(
1666 				soc->hal_soc,
1667 				soc->wbm_idle_link_ring.hal_srng))) {
1668 				hal_set_link_desc_addr(desc,
1669 					LINK_DESC_COOKIE(desc_id, i), paddr);
1670 				num_entries--;
1671 				desc_id++;
1672 				paddr += link_desc_size;
1673 			}
1674 		}
1675 		hal_srng_access_end_unlocked(soc->hal_soc,
1676 			soc->wbm_idle_link_ring.hal_srng);
1677 	} else {
1678 		uint32_t num_scatter_bufs;
1679 		uint32_t num_entries_per_buf;
1680 		uint32_t rem_entries;
1681 		uint8_t *scatter_buf_ptr;
1682 		uint16_t scatter_buf_num;
1683 
1684 		soc->wbm_idle_scatter_buf_size =
1685 			hal_idle_list_scatter_buf_size(soc->hal_soc);
1686 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
1687 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
1688 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1689 					soc->hal_soc, total_mem_size,
1690 					soc->wbm_idle_scatter_buf_size);
1691 
1692 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
1693 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1694 					FL("scatter bufs size out of bounds"));
1695 			goto fail;
1696 		}
1697 
1698 		for (i = 0; i < num_scatter_bufs; i++) {
1699 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
1700 				qdf_mem_alloc_consistent(soc->osdev,
1701 							soc->osdev->dev,
1702 				soc->wbm_idle_scatter_buf_size,
1703 				&(soc->wbm_idle_scatter_buf_base_paddr[i]));
1704 			if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
1705 				QDF_TRACE(QDF_MODULE_ID_DP,
1706 						QDF_TRACE_LEVEL_ERROR,
1707 					FL("Scatter list memory alloc failed"));
1708 				goto fail;
1709 			}
1710 		}
1711 
1712 		/* Populate idle list scatter buffers with link descriptor
1713 		 * pointers
1714 		 */
1715 		scatter_buf_num = 0;
1716 		scatter_buf_ptr = (uint8_t *)(
1717 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
1718 		rem_entries = num_entries_per_buf;
1719 
1720 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1721 			soc->link_desc_banks[i].base_paddr; i++) {
1722 			uint32_t num_link_descs =
1723 				(soc->link_desc_banks[i].size -
1724 				((unsigned long)(
1725 				soc->link_desc_banks[i].base_vaddr) -
1726 				(unsigned long)(
1727 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1728 				/ link_desc_size;
1729 			unsigned long paddr = (unsigned long)(
1730 				soc->link_desc_banks[i].base_paddr);
1731 
1732 			while (num_link_descs) {
1733 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
1734 					LINK_DESC_COOKIE(desc_id, i), paddr);
1735 				num_link_descs--;
1736 				desc_id++;
1737 				paddr += link_desc_size;
1738 				rem_entries--;
1739 				if (rem_entries) {
1740 					scatter_buf_ptr += entry_size;
1741 				} else {
1742 					rem_entries = num_entries_per_buf;
1743 					scatter_buf_num++;
1744 
1745 					if (scatter_buf_num >= num_scatter_bufs)
1746 						break;
1747 
1748 					scatter_buf_ptr = (uint8_t *)(
1749 						soc->wbm_idle_scatter_buf_base_vaddr[
1750 						scatter_buf_num]);
1751 				}
1752 			}
1753 		}
1754 		/* Setup link descriptor idle list in HW */
1755 		hal_setup_link_idle_list(soc->hal_soc,
1756 			soc->wbm_idle_scatter_buf_base_paddr,
1757 			soc->wbm_idle_scatter_buf_base_vaddr,
1758 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
1759 			(uint32_t)(scatter_buf_ptr -
1760 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
1761 			scatter_buf_num-1])), total_link_descs);
1762 	}
1763 	return 0;
1764 
1765 fail:
1766 	if (soc->wbm_idle_link_ring.hal_srng) {
1767 		dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
1768 			WBM_IDLE_LINK, 0);
1769 	}
1770 
1771 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1772 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1773 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1774 				soc->wbm_idle_scatter_buf_size,
1775 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1776 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1777 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1778 		}
1779 	}
1780 
1781 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1782 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1783 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1784 				soc->link_desc_banks[i].size,
1785 				soc->link_desc_banks[i].base_vaddr_unaligned,
1786 				soc->link_desc_banks[i].base_paddr_unaligned,
1787 				0);
1788 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1789 		}
1790 	}
1791 	return QDF_STATUS_E_FAILURE;
1792 }
1793 
1794 /*
1795  * Free link descriptor pool that was setup HW
1796  */
1797 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
1798 {
1799 	int i;
1800 
1801 	if (soc->wbm_idle_link_ring.hal_srng) {
1802 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
1803 			WBM_IDLE_LINK, 0);
1804 	}
1805 
1806 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1807 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1808 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1809 				soc->wbm_idle_scatter_buf_size,
1810 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1811 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1812 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1813 		}
1814 	}
1815 
1816 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1817 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1818 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1819 				soc->link_desc_banks[i].size,
1820 				soc->link_desc_banks[i].base_vaddr_unaligned,
1821 				soc->link_desc_banks[i].base_paddr_unaligned,
1822 				0);
1823 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1824 		}
1825 	}
1826 }
1827 
1828 #define REO_DST_RING_SIZE_QCA6290 1024
1829 #define REO_DST_RING_SIZE_QCA8074 2048
1830 
1831 /*
1832  * dp_wds_aging_timer_fn() - Timer callback function for WDS aging
1833  * @soc: Datapath SOC handle
1834  *
1835  * This is a timer function used to age out stale AST nodes from
1836  * AST table
1837  */
1838 #ifdef FEATURE_WDS
1839 static void dp_wds_aging_timer_fn(void *soc_hdl)
1840 {
1841 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
1842 	struct dp_pdev *pdev;
1843 	struct dp_vdev *vdev;
1844 	struct dp_peer *peer;
1845 	struct dp_ast_entry *ase, *temp_ase;
1846 	int i;
1847 
1848 	qdf_spin_lock_bh(&soc->ast_lock);
1849 
1850 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1851 		pdev = soc->pdev_list[i];
1852 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
1853 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1854 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1855 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
1856 					/*
1857 					 * Do not expire static ast entries
1858 					 * and HM WDS entries
1859 					 */
1860 					if (ase->type != CDP_TXRX_AST_TYPE_WDS)
1861 						continue;
1862 
1863 					if (ase->is_active) {
1864 						ase->is_active = FALSE;
1865 						continue;
1866 					}
1867 
1868 					DP_STATS_INC(soc, ast.aged_out, 1);
1869 					dp_peer_del_ast(soc, ase);
1870 				}
1871 			}
1872 		}
1873 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1874 	}
1875 
1876 	qdf_spin_unlock_bh(&soc->ast_lock);
1877 
1878 	if (qdf_atomic_read(&soc->cmn_init_done))
1879 		qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1880 }
1881 
1882 
1883 /*
1884  * dp_soc_wds_attach() - Setup WDS timer and AST table
1885  * @soc:		Datapath SOC handle
1886  *
1887  * Return: None
1888  */
1889 static void dp_soc_wds_attach(struct dp_soc *soc)
1890 {
1891 	qdf_timer_init(soc->osdev, &soc->wds_aging_timer,
1892 			dp_wds_aging_timer_fn, (void *)soc,
1893 			QDF_TIMER_TYPE_WAKE_APPS);
1894 
1895 	qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1896 }
1897 
1898 /*
1899  * dp_soc_wds_detach() - Detach WDS data structures and timers
1900  * @txrx_soc: DP SOC handle
1901  *
1902  * Return: None
1903  */
1904 static void dp_soc_wds_detach(struct dp_soc *soc)
1905 {
1906 	qdf_timer_stop(&soc->wds_aging_timer);
1907 	qdf_timer_free(&soc->wds_aging_timer);
1908 }
1909 #else
1910 static void dp_soc_wds_attach(struct dp_soc *soc)
1911 {
1912 }
1913 
1914 static void dp_soc_wds_detach(struct dp_soc *soc)
1915 {
1916 }
1917 #endif
1918 
1919 /*
1920  * dp_soc_reset_ring_map() - Reset cpu ring map
1921  * @soc: Datapath soc handler
1922  *
1923  * This api resets the default cpu ring map
1924  */
1925 
1926 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
1927 {
1928 	uint8_t i;
1929 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1930 
1931 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
1932 		if (nss_config == 1) {
1933 			/*
1934 			 * Setting Tx ring map for one nss offloaded radio
1935 			 */
1936 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
1937 		} else if (nss_config == 2) {
1938 			/*
1939 			 * Setting Tx ring for two nss offloaded radios
1940 			 */
1941 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
1942 		} else {
1943 			/*
1944 			 * Setting Tx ring map for all nss offloaded radios
1945 			 */
1946 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_ALL_RADIO_OFFLOADED_MAP][i];
1947 		}
1948 	}
1949 }
1950 
1951 /*
1952  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
1953  * @dp_soc - DP soc handle
1954  * @ring_type - ring type
1955  * @ring_num - ring_num
1956  *
1957  * return 0 or 1
1958  */
1959 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
1960 {
1961 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1962 	uint8_t status = 0;
1963 
1964 	switch (ring_type) {
1965 	case WBM2SW_RELEASE:
1966 	case REO_DST:
1967 	case RXDMA_BUF:
1968 		status = ((nss_config) & (1 << ring_num));
1969 		break;
1970 	default:
1971 		break;
1972 	}
1973 
1974 	return status;
1975 }
1976 
1977 /*
1978  * dp_soc_reset_intr_mask() - reset interrupt mask
1979  * @dp_soc - DP Soc handle
1980  *
1981  * Return: Return void
1982  */
1983 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
1984 {
1985 	uint8_t j;
1986 	int *grp_mask = NULL;
1987 	int group_number, mask, num_ring;
1988 
1989 	/* number of tx ring */
1990 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
1991 
1992 	/*
1993 	 * group mask for tx completion  ring.
1994 	 */
1995 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1996 
1997 	/* loop and reset the mask for only offloaded ring */
1998 	for (j = 0; j < num_ring; j++) {
1999 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2000 			continue;
2001 		}
2002 
2003 		/*
2004 		 * Group number corresponding to tx offloaded ring.
2005 		 */
2006 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2007 		if (group_number < 0) {
2008 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2009 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2010 					WBM2SW_RELEASE, j);
2011 			return;
2012 		}
2013 
2014 		/* reset the tx mask for offloaded ring */
2015 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2016 		mask &= (~(1 << j));
2017 
2018 		/*
2019 		 * reset the interrupt mask for offloaded ring.
2020 		 */
2021 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2022 	}
2023 
2024 	/* number of rx rings */
2025 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2026 
2027 	/*
2028 	 * group mask for reo destination ring.
2029 	 */
2030 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2031 
2032 	/* loop and reset the mask for only offloaded ring */
2033 	for (j = 0; j < num_ring; j++) {
2034 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2035 			continue;
2036 		}
2037 
2038 		/*
2039 		 * Group number corresponding to rx offloaded ring.
2040 		 */
2041 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2042 		if (group_number < 0) {
2043 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2044 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2045 					REO_DST, j);
2046 			return;
2047 		}
2048 
2049 		/* set the interrupt mask for offloaded ring */
2050 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2051 		mask &= (~(1 << j));
2052 
2053 		/*
2054 		 * set the interrupt mask to zero for rx offloaded radio.
2055 		 */
2056 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2057 	}
2058 
2059 	/*
2060 	 * group mask for Rx buffer refill ring
2061 	 */
2062 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2063 
2064 	/* loop and reset the mask for only offloaded ring */
2065 	for (j = 0; j < MAX_PDEV_CNT; j++) {
2066 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2067 			continue;
2068 		}
2069 
2070 		/*
2071 		 * Group number corresponding to rx offloaded ring.
2072 		 */
2073 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2074 		if (group_number < 0) {
2075 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2076 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2077 					REO_DST, j);
2078 			return;
2079 		}
2080 
2081 		/* set the interrupt mask for offloaded ring */
2082 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2083 				group_number);
2084 		mask &= (~(1 << j));
2085 
2086 		/*
2087 		 * set the interrupt mask to zero for rx offloaded radio.
2088 		 */
2089 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2090 			group_number, mask);
2091 	}
2092 }
2093 
2094 #ifdef IPA_OFFLOAD
2095 /**
2096  * dp_reo_remap_config() - configure reo remap register value based
2097  *                         nss configuration.
2098  *		based on offload_radio value below remap configuration
2099  *		get applied.
2100  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2101  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2102  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2103  *		3 - both Radios handled by NSS (remap not required)
2104  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2105  *
2106  * @remap1: output parameter indicates reo remap 1 register value
2107  * @remap2: output parameter indicates reo remap 2 register value
2108  * Return: bool type, true if remap is configured else false.
2109  */
2110 static bool dp_reo_remap_config(struct dp_soc *soc,
2111 				uint32_t *remap1,
2112 				uint32_t *remap2)
2113 {
2114 
2115 	*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
2116 		(0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
2117 
2118 	*remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
2119 		(0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
2120 
2121 	return true;
2122 }
2123 #else
2124 static bool dp_reo_remap_config(struct dp_soc *soc,
2125 				uint32_t *remap1,
2126 				uint32_t *remap2)
2127 {
2128 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2129 
2130 	switch (offload_radio) {
2131 	case 0:
2132 		*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2133 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2134 			(0x3 << 18) | (0x4 << 21)) << 8;
2135 
2136 		*remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2137 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2138 			(0x3 << 18) | (0x4 << 21)) << 8;
2139 		break;
2140 
2141 	case 1:
2142 		*remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
2143 			(0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
2144 			(0x2 << 18) | (0x3 << 21)) << 8;
2145 
2146 		*remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
2147 			(0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
2148 			(0x4 << 18) | (0x2 << 21)) << 8;
2149 		break;
2150 
2151 	case 2:
2152 		*remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
2153 			(0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
2154 			(0x1 << 18) | (0x3 << 21)) << 8;
2155 
2156 		*remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
2157 			(0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
2158 			(0x4 << 18) | (0x1 << 21)) << 8;
2159 		break;
2160 
2161 	case 3:
2162 		/* return false if both radios are offloaded to NSS */
2163 		return false;
2164 	}
2165 	return true;
2166 }
2167 #endif
2168 
2169 /*
2170  * dp_reo_frag_dst_set() - configure reo register to set the
2171  *                        fragment destination ring
2172  * @soc : Datapath soc
2173  * @frag_dst_ring : output parameter to set fragment destination ring
2174  *
2175  * Based on offload_radio below fragment destination rings is selected
2176  * 0 - TCL
2177  * 1 - SW1
2178  * 2 - SW2
2179  * 3 - SW3
2180  * 4 - SW4
2181  * 5 - Release
2182  * 6 - FW
2183  * 7 - alternate select
2184  *
2185  * return: void
2186  */
2187 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2188 {
2189 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2190 
2191 	switch (offload_radio) {
2192 	case 0:
2193 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2194 		break;
2195 	case 3:
2196 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2197 		break;
2198 	default:
2199 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2200 				FL("dp_reo_frag_dst_set invalid offload radio config"));
2201 		break;
2202 	}
2203 }
2204 
2205 /*
2206  * dp_soc_cmn_setup() - Common SoC level initializion
2207  * @soc:		Datapath SOC handle
2208  *
2209  * This is an internal function used to setup common SOC data structures,
2210  * to be called from PDEV attach after receiving HW mode capabilities from FW
2211  */
2212 static int dp_soc_cmn_setup(struct dp_soc *soc)
2213 {
2214 	int i;
2215 	struct hal_reo_params reo_params;
2216 	int tx_ring_size;
2217 	int tx_comp_ring_size;
2218 	int reo_dst_ring_size;
2219 	uint32_t entries;
2220 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2221 
2222 	if (qdf_atomic_read(&soc->cmn_init_done))
2223 		return 0;
2224 
2225 	if (dp_hw_link_desc_pool_setup(soc))
2226 		goto fail1;
2227 
2228 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2229 	/* Setup SRNG rings */
2230 	/* Common rings */
2231 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
2232 		wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx))) {
2233 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2234 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
2235 		goto fail1;
2236 	}
2237 
2238 
2239 	soc->num_tcl_data_rings = 0;
2240 	/* Tx data rings */
2241 	if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
2242 		soc->num_tcl_data_rings =
2243 			wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
2244 		tx_comp_ring_size =
2245 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2246 		tx_ring_size =
2247 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2248 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2249 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
2250 				TCL_DATA, i, 0, tx_ring_size)) {
2251 				QDF_TRACE(QDF_MODULE_ID_DP,
2252 					QDF_TRACE_LEVEL_ERROR,
2253 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
2254 				goto fail1;
2255 			}
2256 			/*
2257 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2258 			 * count
2259 			 */
2260 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
2261 				WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
2262 				QDF_TRACE(QDF_MODULE_ID_DP,
2263 					QDF_TRACE_LEVEL_ERROR,
2264 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
2265 				goto fail1;
2266 			}
2267 		}
2268 	} else {
2269 		/* This will be incremented during per pdev ring setup */
2270 		soc->num_tcl_data_rings = 0;
2271 	}
2272 
2273 	if (dp_tx_soc_attach(soc)) {
2274 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2275 				FL("dp_tx_soc_attach failed"));
2276 		goto fail1;
2277 	}
2278 
2279 	entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
2280 	/* TCL command and status rings */
2281 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
2282 			  entries)) {
2283 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2284 			FL("dp_srng_setup failed for tcl_cmd_ring"));
2285 		goto fail1;
2286 	}
2287 
2288 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
2289 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
2290 			  entries)) {
2291 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2292 			FL("dp_srng_setup failed for tcl_status_ring"));
2293 		goto fail1;
2294 	}
2295 
2296 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2297 
2298 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2299 	 * descriptors
2300 	 */
2301 
2302 	/* Rx data rings */
2303 	if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2304 		soc->num_reo_dest_rings =
2305 			wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
2306 		QDF_TRACE(QDF_MODULE_ID_DP,
2307 			QDF_TRACE_LEVEL_INFO,
2308 			FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
2309 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2310 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2311 				i, 0, reo_dst_ring_size)) {
2312 				QDF_TRACE(QDF_MODULE_ID_DP,
2313 					  QDF_TRACE_LEVEL_ERROR,
2314 					  FL(RNG_ERR "reo_dest_ring [%d]"), i);
2315 				goto fail1;
2316 			}
2317 		}
2318 	} else {
2319 		/* This will be incremented during per pdev ring setup */
2320 		soc->num_reo_dest_rings = 0;
2321 	}
2322 
2323 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2324 	/* LMAC RxDMA to SW Rings configuration */
2325 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
2326 		/* Only valid for MCL */
2327 		struct dp_pdev *pdev = soc->pdev_list[0];
2328 
2329 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2330 			if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2331 					  RXDMA_DST, 0, i,
2332 					  entries)) {
2333 				QDF_TRACE(QDF_MODULE_ID_DP,
2334 					  QDF_TRACE_LEVEL_ERROR,
2335 					  FL(RNG_ERR "rxdma_err_dst_ring"));
2336 				goto fail1;
2337 			}
2338 		}
2339 	}
2340 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
2341 
2342 	/* REO reinjection ring */
2343 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
2344 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2345 			  entries)) {
2346 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2347 			  FL("dp_srng_setup failed for reo_reinject_ring"));
2348 		goto fail1;
2349 	}
2350 
2351 
2352 	/* Rx release ring */
2353 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2354 		wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx))) {
2355 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2356 			  FL("dp_srng_setup failed for rx_rel_ring"));
2357 		goto fail1;
2358 	}
2359 
2360 
2361 	/* Rx exception ring */
2362 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
2363 	if (dp_srng_setup(soc, &soc->reo_exception_ring,
2364 			  REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries)) {
2365 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2366 			  FL("dp_srng_setup failed for reo_exception_ring"));
2367 		goto fail1;
2368 	}
2369 
2370 
2371 	/* REO command and status rings */
2372 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
2373 		wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx))) {
2374 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2375 			FL("dp_srng_setup failed for reo_cmd_ring"));
2376 		goto fail1;
2377 	}
2378 
2379 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2380 	TAILQ_INIT(&soc->rx.reo_cmd_list);
2381 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2382 
2383 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
2384 		wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx))) {
2385 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2386 			FL("dp_srng_setup failed for reo_status_ring"));
2387 		goto fail1;
2388 	}
2389 
2390 	qdf_spinlock_create(&soc->ast_lock);
2391 	dp_soc_wds_attach(soc);
2392 
2393 	/* Reset the cpu ring map if radio is NSS offloaded */
2394 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
2395 		dp_soc_reset_cpu_ring_map(soc);
2396 		dp_soc_reset_intr_mask(soc);
2397 	}
2398 
2399 	/* Setup HW REO */
2400 	qdf_mem_zero(&reo_params, sizeof(reo_params));
2401 
2402 	if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
2403 
2404 		/*
2405 		 * Reo ring remap is not required if both radios
2406 		 * are offloaded to NSS
2407 		 */
2408 		if (!dp_reo_remap_config(soc,
2409 					&reo_params.remap1,
2410 					&reo_params.remap2))
2411 			goto out;
2412 
2413 		reo_params.rx_hash_enabled = true;
2414 	}
2415 
2416 	/* setup the global rx defrag waitlist */
2417 	TAILQ_INIT(&soc->rx.defrag.waitlist);
2418 	soc->rx.defrag.timeout_ms =
2419 		wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
2420 	soc->rx.flags.defrag_timeout_check =
2421 		wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
2422 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
2423 
2424 out:
2425 	/*
2426 	 * set the fragment destination ring
2427 	 */
2428 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2429 
2430 	hal_reo_setup(soc->hal_soc, &reo_params);
2431 
2432 	qdf_atomic_set(&soc->cmn_init_done, 1);
2433 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
2434 	return 0;
2435 fail1:
2436 	/*
2437 	 * Cleanup will be done as part of soc_detach, which will
2438 	 * be called on pdev attach failure
2439 	 */
2440 	return QDF_STATUS_E_FAILURE;
2441 }
2442 
2443 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
2444 
2445 static void dp_lro_hash_setup(struct dp_soc *soc)
2446 {
2447 	struct cdp_lro_hash_config lro_hash;
2448 
2449 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2450 		!wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2451 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2452 			 FL("LRO disabled RX hash disabled"));
2453 		return;
2454 	}
2455 
2456 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2457 
2458 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
2459 		lro_hash.lro_enable = 1;
2460 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2461 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
2462 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2463 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
2464 	}
2465 
2466 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, FL("enabled"));
2467 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
2468 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2469 		 LRO_IPV4_SEED_ARR_SZ));
2470 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2471 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2472 		 LRO_IPV6_SEED_ARR_SZ));
2473 
2474 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2475 		 "lro_hash: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
2476 		 lro_hash.lro_enable, lro_hash.tcp_flag,
2477 		 lro_hash.tcp_flag_mask);
2478 
2479 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2480 		 QDF_TRACE_LEVEL_ERROR,
2481 		 (void *)lro_hash.toeplitz_hash_ipv4,
2482 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2483 		 LRO_IPV4_SEED_ARR_SZ));
2484 
2485 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2486 		 QDF_TRACE_LEVEL_ERROR,
2487 		 (void *)lro_hash.toeplitz_hash_ipv6,
2488 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2489 		 LRO_IPV6_SEED_ARR_SZ));
2490 
2491 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2492 
2493 	if (soc->cdp_soc.ol_ops->lro_hash_config)
2494 		(void)soc->cdp_soc.ol_ops->lro_hash_config
2495 			(soc->ctrl_psoc, &lro_hash);
2496 }
2497 
2498 /*
2499 * dp_rxdma_ring_setup() - configure the RX DMA rings
2500 * @soc: data path SoC handle
2501 * @pdev: Physical device handle
2502 *
2503 * Return: 0 - success, > 0 - failure
2504 */
2505 #ifdef QCA_HOST2FW_RXBUF_RING
2506 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2507 	 struct dp_pdev *pdev)
2508 {
2509 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2510 	int max_mac_rings;
2511 	int i;
2512 
2513 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2514 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
2515 
2516 	for (i = 0; i < max_mac_rings; i++) {
2517 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2518 			 "%s: pdev_id %d mac_id %d",
2519 			 __func__, pdev->pdev_id, i);
2520 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
2521 			RXDMA_BUF, 1, i,
2522 			wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx))) {
2523 			QDF_TRACE(QDF_MODULE_ID_DP,
2524 				 QDF_TRACE_LEVEL_ERROR,
2525 				 FL("failed rx mac ring setup"));
2526 			return QDF_STATUS_E_FAILURE;
2527 		}
2528 	}
2529 	return QDF_STATUS_SUCCESS;
2530 }
2531 #else
2532 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2533 	 struct dp_pdev *pdev)
2534 {
2535 	return QDF_STATUS_SUCCESS;
2536 }
2537 #endif
2538 
2539 /**
2540  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2541  * @pdev - DP_PDEV handle
2542  *
2543  * Return: void
2544  */
2545 static inline void
2546 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2547 {
2548 	uint8_t map_id;
2549 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2550 		qdf_mem_copy(pdev->dscp_tid_map[map_id], default_dscp_tid_map,
2551 				sizeof(default_dscp_tid_map));
2552 	}
2553 	for (map_id = 0; map_id < HAL_MAX_HW_DSCP_TID_MAPS; map_id++) {
2554 		hal_tx_set_dscp_tid_map(pdev->soc->hal_soc,
2555 				pdev->dscp_tid_map[map_id],
2556 				map_id);
2557 	}
2558 }
2559 
2560 #ifdef QCA_SUPPORT_SON
2561 /**
2562  * dp_mark_peer_inact(): Update peer inactivity status
2563  * @peer_handle - datapath peer handle
2564  *
2565  * Return: void
2566  */
2567 void dp_mark_peer_inact(void *peer_handle, bool inactive)
2568 {
2569 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2570 	struct dp_pdev *pdev;
2571 	struct dp_soc *soc;
2572 	bool inactive_old;
2573 
2574 	if (!peer)
2575 		return;
2576 
2577 	pdev = peer->vdev->pdev;
2578 	soc = pdev->soc;
2579 
2580 	inactive_old = peer->peer_bs_inact_flag == 1;
2581 	if (!inactive)
2582 		peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2583 	peer->peer_bs_inact_flag = inactive ? 1 : 0;
2584 
2585 	if (inactive_old != inactive) {
2586 		/**
2587 		 * Note: a node lookup can happen in RX datapath context
2588 		 * when a node changes from inactive to active (at most once
2589 		 * per inactivity timeout threshold)
2590 		 */
2591 		if (soc->cdp_soc.ol_ops->record_act_change) {
2592 			soc->cdp_soc.ol_ops->record_act_change(
2593 					(void *)pdev->ctrl_pdev,
2594 					peer->mac_addr.raw, !inactive);
2595 		}
2596 	}
2597 }
2598 
2599 /**
2600  * dp_txrx_peer_find_inact_timeout_handler(): Inactivity timeout function
2601  *
2602  * Periodically checks the inactivity status
2603  */
2604 static os_timer_func(dp_txrx_peer_find_inact_timeout_handler)
2605 {
2606 	struct dp_pdev *pdev;
2607 	struct dp_vdev *vdev;
2608 	struct dp_peer *peer;
2609 	struct dp_soc *soc;
2610 	int i;
2611 
2612 	OS_GET_TIMER_ARG(soc, struct dp_soc *);
2613 
2614 	qdf_spin_lock(&soc->peer_ref_mutex);
2615 
2616 	for (i = 0; i < soc->pdev_count; i++) {
2617 	pdev = soc->pdev_list[i];
2618 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
2619 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2620 		if (vdev->opmode != wlan_op_mode_ap)
2621 			continue;
2622 
2623 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2624 			if (!peer->authorize) {
2625 				/**
2626 				 * Inactivity check only interested in
2627 				 * connected node
2628 				 */
2629 				continue;
2630 			}
2631 			if (peer->peer_bs_inact > soc->pdev_bs_inact_reload) {
2632 				/**
2633 				 * This check ensures we do not wait extra long
2634 				 * due to the potential race condition
2635 				 */
2636 				peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2637 			}
2638 			if (peer->peer_bs_inact > 0) {
2639 				/* Do not let it wrap around */
2640 				peer->peer_bs_inact--;
2641 			}
2642 			if (peer->peer_bs_inact == 0)
2643 				dp_mark_peer_inact(peer, true);
2644 		}
2645 	}
2646 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2647 	}
2648 
2649 	qdf_spin_unlock(&soc->peer_ref_mutex);
2650 	qdf_timer_mod(&soc->pdev_bs_inact_timer,
2651 		      soc->pdev_bs_inact_interval * 1000);
2652 }
2653 
2654 
2655 /**
2656  * dp_free_inact_timer(): free inact timer
2657  * @timer - inact timer handle
2658  *
2659  * Return: bool
2660  */
2661 void dp_free_inact_timer(struct dp_soc *soc)
2662 {
2663 	qdf_timer_free(&soc->pdev_bs_inact_timer);
2664 }
2665 #else
2666 
2667 void dp_mark_peer_inact(void *peer, bool inactive)
2668 {
2669 	return;
2670 }
2671 
2672 void dp_free_inact_timer(struct dp_soc *soc)
2673 {
2674 	return;
2675 }
2676 
2677 #endif
2678 
2679 #ifdef IPA_OFFLOAD
2680 /**
2681  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
2682  * @soc: data path instance
2683  * @pdev: core txrx pdev context
2684  *
2685  * Return: QDF_STATUS_SUCCESS: success
2686  *         QDF_STATUS_E_RESOURCES: Error return
2687  */
2688 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2689 					   struct dp_pdev *pdev)
2690 {
2691 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2692 	int entries;
2693 
2694 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2695 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
2696 
2697 	/* Setup second Rx refill buffer ring */
2698 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2699 			  IPA_RX_REFILL_BUF_RING_IDX,
2700 			  pdev->pdev_id,
2701 			  entries)) {
2702 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2703 			FL("dp_srng_setup failed second rx refill ring"));
2704 		return QDF_STATUS_E_FAILURE;
2705 	}
2706 	return QDF_STATUS_SUCCESS;
2707 }
2708 
2709 /**
2710  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
2711  * @soc: data path instance
2712  * @pdev: core txrx pdev context
2713  *
2714  * Return: void
2715  */
2716 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2717 					      struct dp_pdev *pdev)
2718 {
2719 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2720 			IPA_RX_REFILL_BUF_RING_IDX);
2721 }
2722 
2723 #else
2724 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2725 					   struct dp_pdev *pdev)
2726 {
2727 	return QDF_STATUS_SUCCESS;
2728 }
2729 
2730 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2731 					      struct dp_pdev *pdev)
2732 {
2733 }
2734 #endif
2735 
2736 #ifndef QCA_WIFI_QCA6390
2737 static
2738 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2739 {
2740 	int mac_id = 0;
2741 	int pdev_id = pdev->pdev_id;
2742 	int entries;
2743 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2744 
2745 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2746 
2747 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
2748 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
2749 
2750 		entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
2751 		if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
2752 				  RXDMA_MONITOR_BUF, 0, mac_for_pdev,
2753 				  entries)) {
2754 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2755 				  FL(RNG_ERR "rxdma_mon_buf_ring "));
2756 			return QDF_STATUS_E_NOMEM;
2757 		}
2758 
2759 		entries = wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
2760 		if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
2761 				  RXDMA_MONITOR_DST, 0, mac_for_pdev,
2762 				  entries)) {
2763 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2764 				  FL(RNG_ERR "rxdma_mon_dst_ring"));
2765 			return QDF_STATUS_E_NOMEM;
2766 		}
2767 
2768 		entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
2769 		if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring[mac_id],
2770 				  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
2771 				  entries)) {
2772 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2773 				  FL(RNG_ERR "rxdma_mon_status_ring"));
2774 			return QDF_STATUS_E_NOMEM;
2775 		}
2776 
2777 		entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
2778 		if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
2779 				  RXDMA_MONITOR_DESC, 0, mac_for_pdev,
2780 				  entries)) {
2781 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2782 				  FL(RNG_ERR "rxdma_mon_desc_ring"));
2783 			return QDF_STATUS_E_NOMEM;
2784 		}
2785 	}
2786 	return QDF_STATUS_SUCCESS;
2787 }
2788 #else
2789 static QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2790 {
2791 	return QDF_STATUS_SUCCESS;
2792 }
2793 #endif
2794 
2795 /*
2796 * dp_pdev_attach_wifi3() - attach txrx pdev
2797 * @ctrl_pdev: Opaque PDEV object
2798 * @txrx_soc: Datapath SOC handle
2799 * @htc_handle: HTC handle for host-target interface
2800 * @qdf_osdev: QDF OS device
2801 * @pdev_id: PDEV ID
2802 *
2803 * Return: DP PDEV handle on success, NULL on failure
2804 */
2805 static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
2806 	struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
2807 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
2808 {
2809 	int tx_ring_size;
2810 	int tx_comp_ring_size;
2811 	int reo_dst_ring_size;
2812 	int entries;
2813 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2814 	int nss_cfg;
2815 
2816 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2817 	struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
2818 
2819 	if (!pdev) {
2820 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2821 			FL("DP PDEV memory allocation failed"));
2822 		goto fail0;
2823 	}
2824 
2825 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2826 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
2827 
2828 	if (!pdev->wlan_cfg_ctx) {
2829 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2830 			FL("pdev cfg_attach failed"));
2831 
2832 		qdf_mem_free(pdev);
2833 		goto fail0;
2834 	}
2835 
2836 	/*
2837 	 * set nss pdev config based on soc config
2838 	 */
2839 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
2840 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
2841 			(nss_cfg & (1 << pdev_id)));
2842 
2843 	pdev->soc = soc;
2844 	pdev->ctrl_pdev = ctrl_pdev;
2845 	pdev->pdev_id = pdev_id;
2846 	soc->pdev_list[pdev_id] = pdev;
2847 	soc->pdev_count++;
2848 
2849 	TAILQ_INIT(&pdev->vdev_list);
2850 	qdf_spinlock_create(&pdev->vdev_list_lock);
2851 	pdev->vdev_count = 0;
2852 
2853 	qdf_spinlock_create(&pdev->tx_mutex);
2854 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
2855 	TAILQ_INIT(&pdev->neighbour_peers_list);
2856 	pdev->neighbour_peers_added = false;
2857 
2858 	if (dp_soc_cmn_setup(soc)) {
2859 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2860 			FL("dp_soc_cmn_setup failed"));
2861 		goto fail1;
2862 	}
2863 
2864 	/* Setup per PDEV TCL rings if configured */
2865 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2866 		tx_ring_size =
2867 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2868 		tx_comp_ring_size =
2869 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2870 
2871 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
2872 			pdev_id, pdev_id, tx_ring_size)) {
2873 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2874 				FL("dp_srng_setup failed for tcl_data_ring"));
2875 			goto fail1;
2876 		}
2877 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
2878 			WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
2879 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2880 				FL("dp_srng_setup failed for tx_comp_ring"));
2881 			goto fail1;
2882 		}
2883 		soc->num_tcl_data_rings++;
2884 	}
2885 
2886 	/* Tx specific init */
2887 	if (dp_tx_pdev_attach(pdev)) {
2888 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2889 			FL("dp_tx_pdev_attach failed"));
2890 		goto fail1;
2891 	}
2892 
2893 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2894 	/* Setup per PDEV REO rings if configured */
2895 	if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2896 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
2897 			pdev_id, pdev_id, reo_dst_ring_size)) {
2898 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2899 				FL("dp_srng_setup failed for reo_dest_ringn"));
2900 			goto fail1;
2901 		}
2902 		soc->num_reo_dest_rings++;
2903 
2904 	}
2905 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
2906 		wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx))) {
2907 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2908 			 FL("dp_srng_setup failed rx refill ring"));
2909 		goto fail1;
2910 	}
2911 
2912 	if (dp_rxdma_ring_setup(soc, pdev)) {
2913 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2914 			 FL("RXDMA ring config failed"));
2915 		goto fail1;
2916 	}
2917 
2918 	if (dp_mon_rings_setup(soc, pdev)) {
2919 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2920 			  FL("MONITOR rings setup failed"));
2921 		goto fail1;
2922 	}
2923 
2924 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2925 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2926 		if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
2927 				  0, pdev_id,
2928 				  entries)) {
2929 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2930 				  FL(RNG_ERR "rxdma_err_dst_ring"));
2931 			goto fail1;
2932 		}
2933 	}
2934 
2935 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
2936 		goto fail1;
2937 
2938 	if (dp_ipa_ring_resource_setup(soc, pdev))
2939 		goto fail1;
2940 
2941 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
2942 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2943 			FL("dp_ipa_uc_attach failed"));
2944 		goto fail1;
2945 	}
2946 
2947 	/* Rx specific init */
2948 	if (dp_rx_pdev_attach(pdev)) {
2949 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2950 			FL("dp_rx_pdev_attach failed"));
2951 		goto fail0;
2952 	}
2953 	DP_STATS_INIT(pdev);
2954 
2955 	/* Monitor filter init */
2956 	pdev->mon_filter_mode = MON_FILTER_ALL;
2957 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
2958 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
2959 	pdev->fp_data_filter = FILTER_DATA_ALL;
2960 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
2961 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
2962 	pdev->mo_data_filter = FILTER_DATA_ALL;
2963 
2964 	dp_local_peer_id_pool_init(pdev);
2965 
2966 	dp_dscp_tid_map_setup(pdev);
2967 
2968 	/* Rx monitor mode specific init */
2969 	if (dp_rx_pdev_mon_attach(pdev)) {
2970 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2971 				"dp_rx_pdev_attach failed");
2972 		goto fail1;
2973 	}
2974 
2975 	if (dp_wdi_event_attach(pdev)) {
2976 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2977 				"dp_wdi_evet_attach failed");
2978 		goto fail1;
2979 	}
2980 
2981 	/* set the reo destination during initialization */
2982 	pdev->reo_dest = pdev->pdev_id + 1;
2983 
2984 	/*
2985 	 * initialize ppdu tlv list
2986 	 */
2987 	TAILQ_INIT(&pdev->ppdu_info_list);
2988 	pdev->tlv_count = 0;
2989 	pdev->list_depth = 0;
2990 
2991 	return (struct cdp_pdev *)pdev;
2992 
2993 fail1:
2994 	dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
2995 
2996 fail0:
2997 	return NULL;
2998 }
2999 
3000 /*
3001 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
3002 * @soc: data path SoC handle
3003 * @pdev: Physical device handle
3004 *
3005 * Return: void
3006 */
3007 #ifdef QCA_HOST2FW_RXBUF_RING
3008 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3009 	 struct dp_pdev *pdev)
3010 {
3011 	int max_mac_rings =
3012 		 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
3013 	int i;
3014 
3015 	max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
3016 				max_mac_rings : MAX_RX_MAC_RINGS;
3017 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
3018 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
3019 			 RXDMA_BUF, 1);
3020 
3021 	qdf_timer_free(&soc->mon_reap_timer);
3022 }
3023 #else
3024 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3025 	 struct dp_pdev *pdev)
3026 {
3027 }
3028 #endif
3029 
3030 /*
3031  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3032  * @pdev: device object
3033  *
3034  * Return: void
3035  */
3036 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3037 {
3038 	struct dp_neighbour_peer *peer = NULL;
3039 	struct dp_neighbour_peer *temp_peer = NULL;
3040 
3041 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3042 			neighbour_peer_list_elem, temp_peer) {
3043 		/* delete this peer from the list */
3044 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
3045 				peer, neighbour_peer_list_elem);
3046 		qdf_mem_free(peer);
3047 	}
3048 
3049 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3050 }
3051 
3052 /**
3053 * dp_htt_ppdu_stats_detach() - detach stats resources
3054 * @pdev: Datapath PDEV handle
3055 *
3056 * Return: void
3057 */
3058 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3059 {
3060 	struct ppdu_info *ppdu_info, *ppdu_info_next;
3061 
3062 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3063 			ppdu_info_list_elem, ppdu_info_next) {
3064 		if (!ppdu_info)
3065 			break;
3066 		qdf_assert_always(ppdu_info->nbuf);
3067 		qdf_nbuf_free(ppdu_info->nbuf);
3068 		qdf_mem_free(ppdu_info);
3069 	}
3070 }
3071 
3072 #ifndef QCA_WIFI_QCA6390
3073 static
3074 void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3075 			int mac_id)
3076 {
3077 		dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
3078 				RXDMA_MONITOR_BUF, 0);
3079 		dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
3080 				RXDMA_MONITOR_DST, 0);
3081 
3082 		dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring[mac_id],
3083 				RXDMA_MONITOR_STATUS, 0);
3084 
3085 		dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
3086 				RXDMA_MONITOR_DESC, 0);
3087 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3088 				RXDMA_DST, 0);
3089 }
3090 #else
3091 static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3092 			       int mac_id)
3093 {
3094 }
3095 #endif
3096 
3097 /*
3098 * dp_pdev_detach_wifi3() - detach txrx pdev
3099 * @txrx_pdev: Datapath PDEV handle
3100 * @force: Force detach
3101 *
3102 */
3103 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
3104 {
3105 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3106 	struct dp_soc *soc = pdev->soc;
3107 	qdf_nbuf_t curr_nbuf, next_nbuf;
3108 	int mac_id;
3109 
3110 	dp_wdi_event_detach(pdev);
3111 
3112 	dp_tx_pdev_detach(pdev);
3113 
3114 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3115 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
3116 			TCL_DATA, pdev->pdev_id);
3117 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
3118 			WBM2SW_RELEASE, pdev->pdev_id);
3119 	}
3120 
3121 	dp_pktlogmod_exit(pdev);
3122 
3123 	dp_rx_pdev_detach(pdev);
3124 	dp_rx_pdev_mon_detach(pdev);
3125 	dp_neighbour_peers_detach(pdev);
3126 	qdf_spinlock_destroy(&pdev->tx_mutex);
3127 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
3128 
3129 	dp_ipa_uc_detach(soc, pdev);
3130 
3131 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
3132 
3133 	/* Cleanup per PDEV REO rings if configured */
3134 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3135 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
3136 			REO_DST, pdev->pdev_id);
3137 	}
3138 
3139 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
3140 
3141 	dp_rxdma_ring_cleanup(soc, pdev);
3142 
3143 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3144 		dp_mon_ring_deinit(soc, pdev, mac_id);
3145 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3146 			RXDMA_DST, 0);
3147 	}
3148 
3149 	curr_nbuf = pdev->invalid_peer_head_msdu;
3150 	while (curr_nbuf) {
3151 		next_nbuf = qdf_nbuf_next(curr_nbuf);
3152 		qdf_nbuf_free(curr_nbuf);
3153 		curr_nbuf = next_nbuf;
3154 	}
3155 
3156 	dp_htt_ppdu_stats_detach(pdev);
3157 
3158 	soc->pdev_list[pdev->pdev_id] = NULL;
3159 	soc->pdev_count--;
3160 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
3161 	qdf_mem_free(pdev->dp_txrx_handle);
3162 	qdf_mem_free(pdev);
3163 }
3164 
3165 /*
3166  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
3167  * @soc: DP SOC handle
3168  */
3169 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
3170 {
3171 	struct reo_desc_list_node *desc;
3172 	struct dp_rx_tid *rx_tid;
3173 
3174 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3175 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
3176 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3177 		rx_tid = &desc->rx_tid;
3178 		qdf_mem_unmap_nbytes_single(soc->osdev,
3179 			rx_tid->hw_qdesc_paddr,
3180 			QDF_DMA_BIDIRECTIONAL,
3181 			rx_tid->hw_qdesc_alloc_size);
3182 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3183 		qdf_mem_free(desc);
3184 	}
3185 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3186 	qdf_list_destroy(&soc->reo_desc_freelist);
3187 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
3188 }
3189 
3190 /*
3191  * dp_soc_detach_wifi3() - Detach txrx SOC
3192  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
3193  */
3194 static void dp_soc_detach_wifi3(void *txrx_soc)
3195 {
3196 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3197 	int i;
3198 
3199 	qdf_atomic_set(&soc->cmn_init_done, 0);
3200 
3201 	qdf_flush_work(&soc->htt_stats.work);
3202 	qdf_disable_work(&soc->htt_stats.work);
3203 
3204 	/* Free pending htt stats messages */
3205 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
3206 
3207 	dp_free_inact_timer(soc);
3208 
3209 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3210 		if (soc->pdev_list[i])
3211 			dp_pdev_detach_wifi3(
3212 				(struct cdp_pdev *)soc->pdev_list[i], 1);
3213 	}
3214 
3215 	dp_peer_find_detach(soc);
3216 
3217 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
3218 	 * SW descriptors
3219 	 */
3220 
3221 	/* Free the ring memories */
3222 	/* Common rings */
3223 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3224 
3225 	dp_tx_soc_detach(soc);
3226 	/* Tx data rings */
3227 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3228 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3229 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
3230 				TCL_DATA, i);
3231 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
3232 				WBM2SW_RELEASE, i);
3233 		}
3234 	}
3235 
3236 	/* TCL command and status rings */
3237 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3238 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3239 
3240 	/* Rx data rings */
3241 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3242 		soc->num_reo_dest_rings =
3243 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3244 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
3245 			/* TODO: Get number of rings and ring sizes
3246 			 * from wlan_cfg
3247 			 */
3248 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
3249 				REO_DST, i);
3250 		}
3251 	}
3252 	/* REO reinjection ring */
3253 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3254 
3255 	/* Rx release ring */
3256 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3257 
3258 	/* Rx exception ring */
3259 	/* TODO: Better to store ring_type and ring_num in
3260 	 * dp_srng during setup
3261 	 */
3262 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3263 
3264 	/* REO command and status rings */
3265 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3266 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
3267 	dp_hw_link_desc_pool_cleanup(soc);
3268 
3269 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
3270 	qdf_spinlock_destroy(&soc->htt_stats.lock);
3271 
3272 	htt_soc_detach(soc->htt_handle);
3273 
3274 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3275 
3276 	dp_reo_cmdlist_destroy(soc);
3277 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3278 	dp_reo_desc_freelist_destroy(soc);
3279 
3280 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
3281 
3282 	dp_soc_wds_detach(soc);
3283 	qdf_spinlock_destroy(&soc->ast_lock);
3284 
3285 	qdf_mem_free(soc);
3286 }
3287 
3288 #ifndef QCA_WIFI_QCA6390
3289 static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3290 				  struct dp_pdev *pdev,
3291 				  int mac_id,
3292 				  int mac_for_pdev)
3293 {
3294 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3295 		       pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3296 		       RXDMA_MONITOR_BUF);
3297 
3298 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3299 		       pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3300 		       RXDMA_MONITOR_DST);
3301 
3302 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3303 		       pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3304 		       RXDMA_MONITOR_STATUS);
3305 
3306 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3307 		       pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3308 		       RXDMA_MONITOR_DESC);
3309 }
3310 #else
3311 static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3312 				  struct dp_pdev *pdev,
3313 				  int mac_id,
3314 				  int mac_for_pdev)
3315 {
3316 }
3317 #endif
3318 /*
3319  * dp_rxdma_ring_config() - configure the RX DMA rings
3320  *
3321  * This function is used to configure the MAC rings.
3322  * On MCL host provides buffers in Host2FW ring
3323  * FW refills (copies) buffers to the ring and updates
3324  * ring_idx in register
3325  *
3326  * @soc: data path SoC handle
3327  *
3328  * Return: void
3329  */
3330 #ifdef QCA_HOST2FW_RXBUF_RING
3331 static void dp_rxdma_ring_config(struct dp_soc *soc)
3332 {
3333 	int i;
3334 
3335 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3336 		struct dp_pdev *pdev = soc->pdev_list[i];
3337 
3338 		if (pdev) {
3339 			int mac_id;
3340 			bool dbs_enable = 0;
3341 			int max_mac_rings =
3342 				 wlan_cfg_get_num_mac_rings
3343 				(pdev->wlan_cfg_ctx);
3344 
3345 			htt_srng_setup(soc->htt_handle, 0,
3346 				 pdev->rx_refill_buf_ring.hal_srng,
3347 				 RXDMA_BUF);
3348 
3349 			if (pdev->rx_refill_buf_ring2.hal_srng)
3350 				htt_srng_setup(soc->htt_handle, 0,
3351 					pdev->rx_refill_buf_ring2.hal_srng,
3352 					RXDMA_BUF);
3353 
3354 			if (soc->cdp_soc.ol_ops->
3355 				is_hw_dbs_2x2_capable) {
3356 				dbs_enable = soc->cdp_soc.ol_ops->
3357 					is_hw_dbs_2x2_capable(soc->ctrl_psoc);
3358 			}
3359 
3360 			if (dbs_enable) {
3361 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3362 				QDF_TRACE_LEVEL_ERROR,
3363 				FL("DBS enabled max_mac_rings %d"),
3364 					 max_mac_rings);
3365 			} else {
3366 				max_mac_rings = 1;
3367 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3368 					 QDF_TRACE_LEVEL_ERROR,
3369 					 FL("DBS disabled, max_mac_rings %d"),
3370 					 max_mac_rings);
3371 			}
3372 
3373 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3374 					 FL("pdev_id %d max_mac_rings %d"),
3375 					 pdev->pdev_id, max_mac_rings);
3376 
3377 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
3378 				int mac_for_pdev = dp_get_mac_id_for_pdev(
3379 							mac_id, pdev->pdev_id);
3380 
3381 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3382 					 QDF_TRACE_LEVEL_ERROR,
3383 					 FL("mac_id %d"), mac_for_pdev);
3384 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3385 					 pdev->rx_mac_buf_ring[mac_id]
3386 						.hal_srng,
3387 					 RXDMA_BUF);
3388 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3389 					pdev->rxdma_err_dst_ring[mac_id]
3390 						.hal_srng,
3391 					RXDMA_DST);
3392 
3393 				/* Configure monitor mode rings */
3394 				dp_mon_htt_srng_setup(soc, pdev, mac_id,
3395 						      mac_for_pdev);
3396 
3397 			}
3398 		}
3399 	}
3400 
3401 	/*
3402 	 * Timer to reap rxdma status rings.
3403 	 * Needed until we enable ppdu end interrupts
3404 	 */
3405 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
3406 			dp_service_mon_rings, (void *)soc,
3407 			QDF_TIMER_TYPE_WAKE_APPS);
3408 	soc->reap_timer_init = 1;
3409 }
3410 #else
3411 /* This is only for WIN */
3412 static void dp_rxdma_ring_config(struct dp_soc *soc)
3413 {
3414 	int i;
3415 	int mac_id;
3416 
3417 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3418 		struct dp_pdev *pdev = soc->pdev_list[i];
3419 
3420 		if (pdev == NULL)
3421 			continue;
3422 
3423 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3424 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
3425 
3426 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3427 				pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
3428 
3429 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3430 				pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3431 				RXDMA_MONITOR_BUF);
3432 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3433 				pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3434 				RXDMA_MONITOR_DST);
3435 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3436 				pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3437 				RXDMA_MONITOR_STATUS);
3438 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3439 				pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3440 				RXDMA_MONITOR_DESC);
3441 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3442 				pdev->rxdma_err_dst_ring[mac_id].hal_srng,
3443 				RXDMA_DST);
3444 		}
3445 	}
3446 }
3447 #endif
3448 
3449 /*
3450  * dp_soc_attach_target_wifi3() - SOC initialization in the target
3451  * @txrx_soc: Datapath SOC handle
3452  */
3453 static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
3454 {
3455 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
3456 
3457 	htt_soc_attach_target(soc->htt_handle);
3458 
3459 	dp_rxdma_ring_config(soc);
3460 
3461 	DP_STATS_INIT(soc);
3462 
3463 	/* initialize work queue for stats processing */
3464 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
3465 
3466 	return 0;
3467 }
3468 
3469 /*
3470  * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
3471  * @txrx_soc: Datapath SOC handle
3472  */
3473 static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
3474 {
3475 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3476 	return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
3477 }
3478 /*
3479  * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
3480  * @txrx_soc: Datapath SOC handle
3481  * @nss_cfg: nss config
3482  */
3483 static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
3484 {
3485 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3486 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
3487 
3488 	wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
3489 
3490 	/*
3491 	 * TODO: masked out based on the per offloaded radio
3492 	 */
3493 	if (config == dp_nss_cfg_dbdc) {
3494 		wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
3495 		wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
3496 		wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
3497 		wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
3498 	}
3499 
3500 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3501 		  FL("nss-wifi<0> nss config is enabled"));
3502 }
3503 /*
3504 * dp_vdev_attach_wifi3() - attach txrx vdev
3505 * @txrx_pdev: Datapath PDEV handle
3506 * @vdev_mac_addr: MAC address of the virtual interface
3507 * @vdev_id: VDEV Id
3508 * @wlan_op_mode: VDEV operating mode
3509 *
3510 * Return: DP VDEV handle on success, NULL on failure
3511 */
3512 static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
3513 	uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
3514 {
3515 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3516 	struct dp_soc *soc = pdev->soc;
3517 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
3518 
3519 	if (!vdev) {
3520 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3521 			FL("DP VDEV memory allocation failed"));
3522 		goto fail0;
3523 	}
3524 
3525 	vdev->pdev = pdev;
3526 	vdev->vdev_id = vdev_id;
3527 	vdev->opmode = op_mode;
3528 	vdev->osdev = soc->osdev;
3529 
3530 	vdev->osif_rx = NULL;
3531 	vdev->osif_rsim_rx_decap = NULL;
3532 	vdev->osif_get_key = NULL;
3533 	vdev->osif_rx_mon = NULL;
3534 	vdev->osif_tx_free_ext = NULL;
3535 	vdev->osif_vdev = NULL;
3536 
3537 	vdev->delete.pending = 0;
3538 	vdev->safemode = 0;
3539 	vdev->drop_unenc = 1;
3540 	vdev->sec_type = cdp_sec_type_none;
3541 #ifdef notyet
3542 	vdev->filters_num = 0;
3543 #endif
3544 
3545 	qdf_mem_copy(
3546 		&vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3547 
3548 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3549 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3550 	vdev->dscp_tid_map_id = 0;
3551 	vdev->mcast_enhancement_en = 0;
3552 
3553 	/* TODO: Initialize default HTT meta data that will be used in
3554 	 * TCL descriptors for packets transmitted from this VDEV
3555 	 */
3556 
3557 	TAILQ_INIT(&vdev->peer_list);
3558 
3559 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3560 	/* add this vdev into the pdev's list */
3561 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
3562 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3563 	pdev->vdev_count++;
3564 
3565 	dp_tx_vdev_attach(vdev);
3566 
3567 
3568 	if ((soc->intr_mode == DP_INTR_POLL) &&
3569 			wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
3570 		if (pdev->vdev_count == 1)
3571 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
3572 	}
3573 
3574 	dp_lro_hash_setup(soc);
3575 
3576 	/* LRO */
3577 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3578 		wlan_op_mode_sta == vdev->opmode)
3579 		vdev->lro_enable = true;
3580 
3581 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3582 		 "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable);
3583 
3584 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3585 		"Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
3586 	DP_STATS_INIT(vdev);
3587 
3588 	if (wlan_op_mode_sta == vdev->opmode)
3589 		dp_peer_create_wifi3((struct cdp_vdev *)vdev,
3590 							vdev->mac_addr.raw,
3591 							NULL);
3592 
3593 	return (struct cdp_vdev *)vdev;
3594 
3595 fail0:
3596 	return NULL;
3597 }
3598 
3599 /**
3600  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
3601  * @vdev: Datapath VDEV handle
3602  * @osif_vdev: OSIF vdev handle
3603  * @ctrl_vdev: UMAC vdev handle
3604  * @txrx_ops: Tx and Rx operations
3605  *
3606  * Return: DP VDEV handle on success, NULL on failure
3607  */
3608 static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
3609 	void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
3610 	struct ol_txrx_ops *txrx_ops)
3611 {
3612 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3613 	vdev->osif_vdev = osif_vdev;
3614 	vdev->ctrl_vdev = ctrl_vdev;
3615 	vdev->osif_rx = txrx_ops->rx.rx;
3616 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
3617 	vdev->osif_get_key = txrx_ops->get_key;
3618 	vdev->osif_rx_mon = txrx_ops->rx.mon;
3619 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
3620 #ifdef notyet
3621 #if ATH_SUPPORT_WAPI
3622 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
3623 #endif
3624 #endif
3625 #ifdef UMAC_SUPPORT_PROXY_ARP
3626 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
3627 #endif
3628 	vdev->me_convert = txrx_ops->me_convert;
3629 
3630 	/* TODO: Enable the following once Tx code is integrated */
3631 	if (vdev->mesh_vdev)
3632 		txrx_ops->tx.tx = dp_tx_send_mesh;
3633 	else
3634 		txrx_ops->tx.tx = dp_tx_send;
3635 
3636 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
3637 
3638 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
3639 		"DP Vdev Register success");
3640 }
3641 
3642 /**
3643  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
3644  * @vdev: Datapath VDEV handle
3645  *
3646  * Return: void
3647  */
3648 static void dp_vdev_flush_peers(struct dp_vdev *vdev)
3649 {
3650 	struct dp_pdev *pdev = vdev->pdev;
3651 	struct dp_soc *soc = pdev->soc;
3652 	struct dp_peer *peer;
3653 	uint16_t *peer_ids;
3654 	uint8_t i = 0, j = 0;
3655 
3656 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
3657 	if (!peer_ids) {
3658 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3659 			"DP alloc failure - unable to flush peers");
3660 		return;
3661 	}
3662 
3663 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3664 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3665 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3666 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
3667 				if (j < soc->max_peers)
3668 					peer_ids[j++] = peer->peer_ids[i];
3669 	}
3670 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3671 
3672 	for (i = 0; i < j ; i++)
3673 		dp_rx_peer_unmap_handler(soc, peer_ids[i]);
3674 
3675 	qdf_mem_free(peer_ids);
3676 
3677 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3678 		FL("Flushed peers for vdev object %pK "), vdev);
3679 }
3680 
3681 /*
3682  * dp_vdev_detach_wifi3() - Detach txrx vdev
3683  * @txrx_vdev:		Datapath VDEV handle
3684  * @callback:		Callback OL_IF on completion of detach
3685  * @cb_context:	Callback context
3686  *
3687  */
3688 static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
3689 	ol_txrx_vdev_delete_cb callback, void *cb_context)
3690 {
3691 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3692 	struct dp_pdev *pdev = vdev->pdev;
3693 	struct dp_soc *soc = pdev->soc;
3694 	struct dp_neighbour_peer *peer = NULL;
3695 
3696 	/* preconditions */
3697 	qdf_assert(vdev);
3698 
3699 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3700 	/* remove the vdev from its parent pdev's list */
3701 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
3702 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3703 
3704 	if (wlan_op_mode_sta == vdev->opmode)
3705 		dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
3706 
3707 	/*
3708 	 * If Target is hung, flush all peers before detaching vdev
3709 	 * this will free all references held due to missing
3710 	 * unmap commands from Target
3711 	 */
3712 	if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
3713 		dp_vdev_flush_peers(vdev);
3714 
3715 	/*
3716 	 * Use peer_ref_mutex while accessing peer_list, in case
3717 	 * a peer is in the process of being removed from the list.
3718 	 */
3719 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3720 	/* check that the vdev has no peers allocated */
3721 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
3722 		/* debug print - will be removed later */
3723 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
3724 			FL("not deleting vdev object %pK (%pM)"
3725 			"until deletion finishes for all its peers"),
3726 			vdev, vdev->mac_addr.raw);
3727 		/* indicate that the vdev needs to be deleted */
3728 		vdev->delete.pending = 1;
3729 		vdev->delete.callback = callback;
3730 		vdev->delete.context = cb_context;
3731 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3732 		return;
3733 	}
3734 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3735 
3736 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3737 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
3738 		      neighbour_peer_list_elem) {
3739 		QDF_ASSERT(peer->vdev != vdev);
3740 	}
3741 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3742 
3743 	dp_tx_vdev_detach(vdev);
3744 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3745 		FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
3746 
3747 	qdf_mem_free(vdev);
3748 
3749 	if (callback)
3750 		callback(cb_context);
3751 }
3752 
3753 /*
3754  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
3755  * @soc - datapath soc handle
3756  * @peer - datapath peer handle
3757  *
3758  * Delete the AST entries belonging to a peer
3759  */
3760 #ifdef FEATURE_AST
3761 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3762 					      struct dp_peer *peer)
3763 {
3764 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
3765 
3766 	qdf_spin_lock_bh(&soc->ast_lock);
3767 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
3768 		dp_peer_del_ast(soc, ast_entry);
3769 
3770 	peer->self_ast_entry = NULL;
3771 	TAILQ_INIT(&peer->ast_entry_list);
3772 	qdf_spin_unlock_bh(&soc->ast_lock);
3773 }
3774 #else
3775 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3776 					      struct dp_peer *peer)
3777 {
3778 }
3779 #endif
3780 
3781 #if ATH_SUPPORT_WRAP
3782 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
3783 						uint8_t *peer_mac_addr)
3784 {
3785 	struct dp_peer *peer;
3786 
3787 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
3788 				      0, vdev->vdev_id);
3789 	if (!peer)
3790 		return NULL;
3791 
3792 	if (peer->bss_peer)
3793 		return peer;
3794 
3795 	qdf_atomic_dec(&peer->ref_cnt);
3796 	return NULL;
3797 }
3798 #else
3799 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
3800 						uint8_t *peer_mac_addr)
3801 {
3802 	struct dp_peer *peer;
3803 
3804 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
3805 				      0, vdev->vdev_id);
3806 	if (!peer)
3807 		return NULL;
3808 
3809 	if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
3810 		return peer;
3811 
3812 	qdf_atomic_dec(&peer->ref_cnt);
3813 	return NULL;
3814 }
3815 #endif
3816 
3817 /*
3818  * dp_peer_create_wifi3() - attach txrx peer
3819  * @txrx_vdev: Datapath VDEV handle
3820  * @peer_mac_addr: Peer MAC address
3821  *
3822  * Return: DP peeer handle on success, NULL on failure
3823  */
3824 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
3825 		uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
3826 {
3827 	struct dp_peer *peer;
3828 	int i;
3829 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3830 	struct dp_pdev *pdev;
3831 	struct dp_soc *soc;
3832 	struct dp_ast_entry *ast_entry;
3833 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
3834 
3835 	/* preconditions */
3836 	qdf_assert(vdev);
3837 	qdf_assert(peer_mac_addr);
3838 
3839 	pdev = vdev->pdev;
3840 	soc = pdev->soc;
3841 
3842 	/*
3843 	 * If a peer entry with given MAC address already exists,
3844 	 * reuse the peer and reset the state of peer.
3845 	 */
3846 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
3847 
3848 	if (peer) {
3849 		peer->delete_in_progress = false;
3850 
3851 		dp_peer_delete_ast_entries(soc, peer);
3852 
3853 		if ((vdev->opmode == wlan_op_mode_sta) &&
3854 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
3855 		     DP_MAC_ADDR_LEN)) {
3856 			ast_type = CDP_TXRX_AST_TYPE_SELF;
3857 		}
3858 
3859 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
3860 
3861 		/*
3862 		* Control path maintains a node count which is incremented
3863 		* for every new peer create command. Since new peer is not being
3864 		* created and earlier reference is reused here,
3865 		* peer_unref_delete event is sent to control path to
3866 		* increment the count back.
3867 		*/
3868 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
3869 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
3870 				vdev->vdev_id, peer->mac_addr.raw);
3871 		}
3872 		peer->ctrl_peer = ctrl_peer;
3873 
3874 		dp_local_peer_id_alloc(pdev, peer);
3875 		DP_STATS_INIT(peer);
3876 
3877 		return (void *)peer;
3878 	} else {
3879 		/*
3880 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
3881 		 * need to remove the AST entry which was earlier added as a WDS
3882 		 * entry.
3883 		 * If an AST entry exists, but no peer entry exists with a given
3884 		 * MAC addresses, we could deduce it as a WDS entry
3885 		 */
3886 		ast_entry = dp_peer_ast_hash_find(soc, peer_mac_addr);
3887 		if (ast_entry)
3888 			dp_peer_del_ast(soc, ast_entry);
3889 	}
3890 
3891 #ifdef notyet
3892 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
3893 		soc->mempool_ol_ath_peer);
3894 #else
3895 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
3896 #endif
3897 
3898 	if (!peer)
3899 		return NULL; /* failure */
3900 
3901 	qdf_mem_zero(peer, sizeof(struct dp_peer));
3902 
3903 	TAILQ_INIT(&peer->ast_entry_list);
3904 
3905 	/* store provided params */
3906 	peer->vdev = vdev;
3907 	peer->ctrl_peer = ctrl_peer;
3908 
3909 	if ((vdev->opmode == wlan_op_mode_sta) &&
3910 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
3911 			 DP_MAC_ADDR_LEN)) {
3912 		ast_type = CDP_TXRX_AST_TYPE_SELF;
3913 	}
3914 
3915 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
3916 
3917 	qdf_spinlock_create(&peer->peer_info_lock);
3918 
3919 	qdf_mem_copy(
3920 		&peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3921 
3922 	/* TODO: See of rx_opt_proc is really required */
3923 	peer->rx_opt_proc = soc->rx_opt_proc;
3924 
3925 	/* initialize the peer_id */
3926 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3927 		peer->peer_ids[i] = HTT_INVALID_PEER;
3928 
3929 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3930 
3931 	qdf_atomic_init(&peer->ref_cnt);
3932 
3933 	/* keep one reference for attach */
3934 	qdf_atomic_inc(&peer->ref_cnt);
3935 
3936 	/* add this peer into the vdev's list */
3937 	if (wlan_op_mode_sta == vdev->opmode)
3938 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
3939 	else
3940 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
3941 
3942 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3943 
3944 	/* TODO: See if hash based search is required */
3945 	dp_peer_find_hash_add(soc, peer);
3946 
3947 	/* Initialize the peer state */
3948 	peer->state = OL_TXRX_PEER_STATE_DISC;
3949 
3950 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3951 		"vdev %pK created peer %pK (%pM) ref_cnt: %d",
3952 		vdev, peer, peer->mac_addr.raw,
3953 		qdf_atomic_read(&peer->ref_cnt));
3954 	/*
3955 	 * For every peer MAp message search and set if bss_peer
3956 	 */
3957 	if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
3958 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3959 			"vdev bss_peer!!!!");
3960 		peer->bss_peer = 1;
3961 		vdev->vap_bss_peer = peer;
3962 	}
3963 	for (i = 0; i < DP_MAX_TIDS; i++)
3964 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
3965 
3966 	dp_local_peer_id_alloc(pdev, peer);
3967 	DP_STATS_INIT(peer);
3968 	return (void *)peer;
3969 }
3970 
3971 /*
3972  * dp_peer_setup_wifi3() - initialize the peer
3973  * @vdev_hdl: virtual device object
3974  * @peer: Peer object
3975  *
3976  * Return: void
3977  */
3978 static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
3979 {
3980 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
3981 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
3982 	struct dp_pdev *pdev;
3983 	struct dp_soc *soc;
3984 	bool hash_based = 0;
3985 	enum cdp_host_reo_dest_ring reo_dest;
3986 
3987 	/* preconditions */
3988 	qdf_assert(vdev);
3989 	qdf_assert(peer);
3990 
3991 	pdev = vdev->pdev;
3992 	soc = pdev->soc;
3993 
3994 	peer->last_assoc_rcvd = 0;
3995 	peer->last_disassoc_rcvd = 0;
3996 	peer->last_deauth_rcvd = 0;
3997 
3998 	/*
3999 	 * hash based steering is disabled for Radios which are offloaded
4000 	 * to NSS
4001 	 */
4002 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
4003 		hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
4004 
4005 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4006 		FL("hash based steering for pdev: %d is %d"),
4007 		pdev->pdev_id, hash_based);
4008 
4009 	/*
4010 	 * Below line of code will ensure the proper reo_dest ring is chosen
4011 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
4012 	 */
4013 	reo_dest = pdev->reo_dest;
4014 
4015 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
4016 		/* TODO: Check the destination ring number to be passed to FW */
4017 		soc->cdp_soc.ol_ops->peer_set_default_routing(
4018 				pdev->ctrl_pdev, peer->mac_addr.raw,
4019 				peer->vdev->vdev_id, hash_based, reo_dest);
4020 	}
4021 
4022 	dp_peer_rx_init(pdev, peer);
4023 	return;
4024 }
4025 
4026 /*
4027  * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
4028  * @vdev_handle: virtual device object
4029  * @htt_pkt_type: type of pkt
4030  *
4031  * Return: void
4032  */
4033 static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
4034 	 enum htt_cmn_pkt_type val)
4035 {
4036 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4037 	vdev->tx_encap_type = val;
4038 }
4039 
4040 /*
4041  * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
4042  * @vdev_handle: virtual device object
4043  * @htt_pkt_type: type of pkt
4044  *
4045  * Return: void
4046  */
4047 static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
4048 	 enum htt_cmn_pkt_type val)
4049 {
4050 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4051 	vdev->rx_decap_type = val;
4052 }
4053 
4054 /*
4055  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
4056  * @pdev_handle: physical device object
4057  * @val: reo destination ring index (1 - 4)
4058  *
4059  * Return: void
4060  */
4061 static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
4062 	 enum cdp_host_reo_dest_ring val)
4063 {
4064 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4065 
4066 	if (pdev)
4067 		pdev->reo_dest = val;
4068 }
4069 
4070 /*
4071  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
4072  * @pdev_handle: physical device object
4073  *
4074  * Return: reo destination ring index
4075  */
4076 static enum cdp_host_reo_dest_ring
4077 dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
4078 {
4079 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4080 
4081 	if (pdev)
4082 		return pdev->reo_dest;
4083 	else
4084 		return cdp_host_reo_dest_ring_unknown;
4085 }
4086 
4087 #ifdef QCA_SUPPORT_SON
4088 static void dp_son_peer_authorize(struct dp_peer *peer)
4089 {
4090 	struct dp_soc *soc;
4091 	soc = peer->vdev->pdev->soc;
4092 	peer->peer_bs_inact_flag = 0;
4093 	peer->peer_bs_inact = soc->pdev_bs_inact_reload;
4094 	return;
4095 }
4096 #else
4097 static void dp_son_peer_authorize(struct dp_peer *peer)
4098 {
4099 	return;
4100 }
4101 #endif
4102 /*
4103  * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
4104  * @pdev_handle: device object
4105  * @val: value to be set
4106  *
4107  * Return: void
4108  */
4109 static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
4110 	 uint32_t val)
4111 {
4112 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4113 
4114 	/* Enable/Disable smart mesh filtering. This flag will be checked
4115 	 * during rx processing to check if packets are from NAC clients.
4116 	 */
4117 	pdev->filter_neighbour_peers = val;
4118 	return 0;
4119 }
4120 
4121 /*
4122  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
4123  * address for smart mesh filtering
4124  * @vdev_handle: virtual device object
4125  * @cmd: Add/Del command
4126  * @macaddr: nac client mac address
4127  *
4128  * Return: void
4129  */
4130 static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
4131 					    uint32_t cmd, uint8_t *macaddr)
4132 {
4133 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4134 	struct dp_pdev *pdev = vdev->pdev;
4135 	struct dp_neighbour_peer *peer = NULL;
4136 
4137 	if (!macaddr)
4138 		goto fail0;
4139 
4140 	/* Store address of NAC (neighbour peer) which will be checked
4141 	 * against TA of received packets.
4142 	 */
4143 	if (cmd == DP_NAC_PARAM_ADD) {
4144 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
4145 				sizeof(*peer));
4146 
4147 		if (!peer) {
4148 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4149 				FL("DP neighbour peer node memory allocation failed"));
4150 			goto fail0;
4151 		}
4152 
4153 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
4154 			macaddr, DP_MAC_ADDR_LEN);
4155 		peer->vdev = vdev;
4156 
4157 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4158 
4159 		/* add this neighbour peer into the list */
4160 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
4161 				neighbour_peer_list_elem);
4162 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4163 
4164 		/* first neighbour */
4165 		if (!pdev->neighbour_peers_added) {
4166 			if (!pdev->mcopy_mode && !pdev->enhanced_stats_en)
4167 				dp_ppdu_ring_cfg(pdev);
4168 			pdev->neighbour_peers_added = true;
4169 		}
4170 		return 1;
4171 
4172 	} else if (cmd == DP_NAC_PARAM_DEL) {
4173 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4174 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
4175 				neighbour_peer_list_elem) {
4176 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
4177 				macaddr, DP_MAC_ADDR_LEN)) {
4178 				/* delete this peer from the list */
4179 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
4180 					peer, neighbour_peer_list_elem);
4181 				qdf_mem_free(peer);
4182 				break;
4183 			}
4184 		}
4185 		/* last neighbour deleted */
4186 		if (TAILQ_EMPTY(&pdev->neighbour_peers_list))
4187 			pdev->neighbour_peers_added = false;
4188 
4189 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4190 
4191 		if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
4192 		    !pdev->enhanced_stats_en)
4193 			dp_ppdu_ring_reset(pdev);
4194 		return 1;
4195 
4196 	}
4197 
4198 fail0:
4199 	return 0;
4200 }
4201 
4202 /*
4203  * dp_get_sec_type() - Get the security type
4204  * @peer:		Datapath peer handle
4205  * @sec_idx:    Security id (mcast, ucast)
4206  *
4207  * return sec_type: Security type
4208  */
4209 static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
4210 {
4211 	struct dp_peer *dpeer = (struct dp_peer *)peer;
4212 
4213 	return dpeer->security[sec_idx].sec_type;
4214 }
4215 
4216 /*
4217  * dp_peer_authorize() - authorize txrx peer
4218  * @peer_handle:		Datapath peer handle
4219  * @authorize
4220  *
4221  */
4222 static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
4223 {
4224 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4225 	struct dp_soc *soc;
4226 
4227 	if (peer != NULL) {
4228 		soc = peer->vdev->pdev->soc;
4229 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
4230 		dp_son_peer_authorize(peer);
4231 		peer->authorize = authorize ? 1 : 0;
4232 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4233 	}
4234 }
4235 
4236 #ifdef QCA_SUPPORT_SON
4237 /*
4238  * dp_txrx_update_inact_threshold() - Update inact timer threshold
4239  * @pdev_handle: Device handle
4240  * @new_threshold : updated threshold value
4241  *
4242  */
4243 static void
4244 dp_txrx_update_inact_threshold(struct cdp_pdev *pdev_handle,
4245 			       u_int16_t new_threshold)
4246 {
4247 	struct dp_vdev *vdev;
4248 	struct dp_peer *peer;
4249 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4250 	struct dp_soc *soc = pdev->soc;
4251 	u_int16_t old_threshold = soc->pdev_bs_inact_reload;
4252 
4253 	if (old_threshold == new_threshold)
4254 		return;
4255 
4256 	soc->pdev_bs_inact_reload = new_threshold;
4257 
4258 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4259 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4260 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4261 		if (vdev->opmode != wlan_op_mode_ap)
4262 			continue;
4263 
4264 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4265 			if (!peer->authorize)
4266 				continue;
4267 
4268 			if (old_threshold - peer->peer_bs_inact >=
4269 					new_threshold) {
4270 				dp_mark_peer_inact((void *)peer, true);
4271 				peer->peer_bs_inact = 0;
4272 			} else {
4273 				peer->peer_bs_inact = new_threshold -
4274 					(old_threshold - peer->peer_bs_inact);
4275 			}
4276 		}
4277 	}
4278 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4279 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4280 }
4281 
4282 /**
4283  * dp_txrx_reset_inact_count(): Reset inact count
4284  * @pdev_handle - device handle
4285  *
4286  * Return: void
4287  */
4288 static void
4289 dp_txrx_reset_inact_count(struct cdp_pdev *pdev_handle)
4290 {
4291 	struct dp_vdev *vdev = NULL;
4292 	struct dp_peer *peer = NULL;
4293 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4294 	struct dp_soc *soc = pdev->soc;
4295 
4296 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4297 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4298 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4299 		if (vdev->opmode != wlan_op_mode_ap)
4300 			continue;
4301 
4302 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4303 			if (!peer->authorize)
4304 				continue;
4305 
4306 			peer->peer_bs_inact = soc->pdev_bs_inact_reload;
4307 		}
4308 	}
4309 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4310 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4311 }
4312 
4313 /**
4314  * dp_set_inact_params(): set inactivity params
4315  * @pdev_handle - device handle
4316  * @inact_check_interval - inactivity interval
4317  * @inact_normal - Inactivity normal
4318  * @inact_overload - Inactivity overload
4319  *
4320  * Return: bool
4321  */
4322 bool dp_set_inact_params(struct cdp_pdev *pdev_handle,
4323 			 u_int16_t inact_check_interval,
4324 			 u_int16_t inact_normal, u_int16_t inact_overload)
4325 {
4326 	struct dp_soc *soc;
4327 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4328 
4329 	if (!pdev)
4330 		return false;
4331 
4332 	soc = pdev->soc;
4333 	if (!soc)
4334 		return false;
4335 
4336 	soc->pdev_bs_inact_interval = inact_check_interval;
4337 	soc->pdev_bs_inact_normal = inact_normal;
4338 	soc->pdev_bs_inact_overload = inact_overload;
4339 
4340 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4341 					soc->pdev_bs_inact_normal);
4342 
4343 	return true;
4344 }
4345 
4346 /**
4347  * dp_start_inact_timer(): Inactivity timer start
4348  * @pdev_handle - device handle
4349  * @enable - Inactivity timer start/stop
4350  *
4351  * Return: bool
4352  */
4353 bool dp_start_inact_timer(struct cdp_pdev *pdev_handle, bool enable)
4354 {
4355 	struct dp_soc *soc;
4356 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4357 
4358 	if (!pdev)
4359 		return false;
4360 
4361 	soc = pdev->soc;
4362 	if (!soc)
4363 		return false;
4364 
4365 	if (enable) {
4366 		dp_txrx_reset_inact_count((struct cdp_pdev *)pdev);
4367 		qdf_timer_mod(&soc->pdev_bs_inact_timer,
4368 			      soc->pdev_bs_inact_interval * 1000);
4369 	} else {
4370 		qdf_timer_stop(&soc->pdev_bs_inact_timer);
4371 	}
4372 
4373 	return true;
4374 }
4375 
4376 /**
4377  * dp_set_overload(): Set inactivity overload
4378  * @pdev_handle - device handle
4379  * @overload - overload status
4380  *
4381  * Return: void
4382  */
4383 void dp_set_overload(struct cdp_pdev *pdev_handle, bool overload)
4384 {
4385 	struct dp_soc *soc;
4386 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4387 
4388 	if (!pdev)
4389 		return;
4390 
4391 	soc = pdev->soc;
4392 	if (!soc)
4393 		return;
4394 
4395 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4396 			overload ? soc->pdev_bs_inact_overload :
4397 			soc->pdev_bs_inact_normal);
4398 }
4399 
4400 /**
4401  * dp_peer_is_inact(): check whether peer is inactive
4402  * @peer_handle - datapath peer handle
4403  *
4404  * Return: bool
4405  */
4406 bool dp_peer_is_inact(void *peer_handle)
4407 {
4408 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4409 
4410 	if (!peer)
4411 		return false;
4412 
4413 	return peer->peer_bs_inact_flag == 1;
4414 }
4415 
4416 /**
4417  * dp_init_inact_timer: initialize the inact timer
4418  * @soc - SOC handle
4419  *
4420  * Return: void
4421  */
4422 void dp_init_inact_timer(struct dp_soc *soc)
4423 {
4424 	qdf_timer_init(soc->osdev, &soc->pdev_bs_inact_timer,
4425 		dp_txrx_peer_find_inact_timeout_handler,
4426 		(void *)soc, QDF_TIMER_TYPE_WAKE_APPS);
4427 }
4428 
4429 #else
4430 
4431 bool dp_set_inact_params(struct cdp_pdev *pdev, u_int16_t inact_check_interval,
4432 			 u_int16_t inact_normal, u_int16_t inact_overload)
4433 {
4434 	return false;
4435 }
4436 
4437 bool dp_start_inact_timer(struct cdp_pdev *pdev, bool enable)
4438 {
4439 	return false;
4440 }
4441 
4442 void dp_set_overload(struct cdp_pdev *pdev, bool overload)
4443 {
4444 	return;
4445 }
4446 
4447 void dp_init_inact_timer(struct dp_soc *soc)
4448 {
4449 	return;
4450 }
4451 
4452 bool dp_peer_is_inact(void *peer)
4453 {
4454 	return false;
4455 }
4456 #endif
4457 
4458 /*
4459  * dp_peer_unref_delete() - unref and delete peer
4460  * @peer_handle:		Datapath peer handle
4461  *
4462  */
4463 void dp_peer_unref_delete(void *peer_handle)
4464 {
4465 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4466 	struct dp_peer *bss_peer = NULL;
4467 	struct dp_vdev *vdev = peer->vdev;
4468 	struct dp_pdev *pdev = vdev->pdev;
4469 	struct dp_soc *soc = pdev->soc;
4470 	struct dp_peer *tmppeer;
4471 	int found = 0;
4472 	uint16_t peer_id;
4473 	uint16_t vdev_id;
4474 
4475 	/*
4476 	 * Hold the lock all the way from checking if the peer ref count
4477 	 * is zero until the peer references are removed from the hash
4478 	 * table and vdev list (if the peer ref count is zero).
4479 	 * This protects against a new HL tx operation starting to use the
4480 	 * peer object just after this function concludes it's done being used.
4481 	 * Furthermore, the lock needs to be held while checking whether the
4482 	 * vdev's list of peers is empty, to make sure that list is not modified
4483 	 * concurrently with the empty check.
4484 	 */
4485 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4486 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4487 		  "%s: peer %pK ref_cnt(before decrement): %d", __func__,
4488 		  peer, qdf_atomic_read(&peer->ref_cnt));
4489 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
4490 		peer_id = peer->peer_ids[0];
4491 		vdev_id = vdev->vdev_id;
4492 
4493 		/*
4494 		 * Make sure that the reference to the peer in
4495 		 * peer object map is removed
4496 		 */
4497 		if (peer_id != HTT_INVALID_PEER)
4498 			soc->peer_id_to_obj_map[peer_id] = NULL;
4499 
4500 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4501 			"Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
4502 
4503 		/* remove the reference to the peer from the hash table */
4504 		dp_peer_find_hash_remove(soc, peer);
4505 
4506 		qdf_spin_lock_bh(&soc->ast_lock);
4507 		if (peer->self_ast_entry) {
4508 			dp_peer_del_ast(soc, peer->self_ast_entry);
4509 			peer->self_ast_entry = NULL;
4510 		}
4511 		qdf_spin_unlock_bh(&soc->ast_lock);
4512 
4513 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
4514 			if (tmppeer == peer) {
4515 				found = 1;
4516 				break;
4517 			}
4518 		}
4519 		if (found) {
4520 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
4521 				peer_list_elem);
4522 		} else {
4523 			/*Ignoring the remove operation as peer not found*/
4524 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
4525 				"peer %pK not found in vdev (%pK)->peer_list:%pK",
4526 				peer, vdev, &peer->vdev->peer_list);
4527 		}
4528 
4529 		/* cleanup the peer data */
4530 		dp_peer_cleanup(vdev, peer);
4531 
4532 		/* check whether the parent vdev has no peers left */
4533 		if (TAILQ_EMPTY(&vdev->peer_list)) {
4534 			/*
4535 			 * Now that there are no references to the peer, we can
4536 			 * release the peer reference lock.
4537 			 */
4538 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4539 			/*
4540 			 * Check if the parent vdev was waiting for its peers
4541 			 * to be deleted, in order for it to be deleted too.
4542 			 */
4543 			if (vdev->delete.pending) {
4544 				ol_txrx_vdev_delete_cb vdev_delete_cb =
4545 					vdev->delete.callback;
4546 				void *vdev_delete_context =
4547 					vdev->delete.context;
4548 
4549 				QDF_TRACE(QDF_MODULE_ID_DP,
4550 					QDF_TRACE_LEVEL_INFO_HIGH,
4551 					FL("deleting vdev object %pK (%pM)"
4552 					" - its last peer is done"),
4553 					vdev, vdev->mac_addr.raw);
4554 				/* all peers are gone, go ahead and delete it */
4555 				dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
4556 								FLOW_TYPE_VDEV,
4557 								vdev_id);
4558 				dp_tx_vdev_detach(vdev);
4559 				QDF_TRACE(QDF_MODULE_ID_DP,
4560 					QDF_TRACE_LEVEL_INFO_HIGH,
4561 					FL("deleting vdev object %pK (%pM)"),
4562 					vdev, vdev->mac_addr.raw);
4563 
4564 				qdf_mem_free(vdev);
4565 				vdev = NULL;
4566 				if (vdev_delete_cb)
4567 					vdev_delete_cb(vdev_delete_context);
4568 			}
4569 		} else {
4570 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4571 		}
4572 
4573 		if (vdev) {
4574 			if (vdev->vap_bss_peer == peer) {
4575 				vdev->vap_bss_peer = NULL;
4576 			}
4577 		}
4578 
4579 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
4580 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
4581 					vdev_id, peer->mac_addr.raw);
4582 		}
4583 
4584 		if (!vdev || !vdev->vap_bss_peer) {
4585 			goto free_peer;
4586 		}
4587 
4588 #ifdef notyet
4589 		qdf_mempool_free(soc->osdev, soc->mempool_ol_ath_peer, peer);
4590 #else
4591 		bss_peer = vdev->vap_bss_peer;
4592 		DP_UPDATE_STATS(vdev, peer);
4593 
4594 free_peer:
4595 		qdf_mem_free(peer);
4596 
4597 #endif
4598 	} else {
4599 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4600 	}
4601 }
4602 
4603 /*
4604  * dp_peer_detach_wifi3() – Detach txrx peer
4605  * @peer_handle: Datapath peer handle
4606  * @bitmap: bitmap indicating special handling of request.
4607  *
4608  */
4609 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
4610 {
4611 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4612 
4613 	/* redirect the peer's rx delivery function to point to a
4614 	 * discard func
4615 	 */
4616 
4617 	peer->rx_opt_proc = dp_rx_discard;
4618 	peer->ctrl_peer = NULL;
4619 
4620 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4621 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
4622 
4623 	dp_local_peer_id_free(peer->vdev->pdev, peer);
4624 	qdf_spinlock_destroy(&peer->peer_info_lock);
4625 
4626 	/*
4627 	 * Remove the reference added during peer_attach.
4628 	 * The peer will still be left allocated until the
4629 	 * PEER_UNMAP message arrives to remove the other
4630 	 * reference, added by the PEER_MAP message.
4631 	 */
4632 	dp_peer_unref_delete(peer_handle);
4633 }
4634 
4635 /*
4636  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
4637  * @peer_handle:		Datapath peer handle
4638  *
4639  */
4640 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
4641 {
4642 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4643 	return vdev->mac_addr.raw;
4644 }
4645 
4646 /*
4647  * dp_vdev_set_wds() - Enable per packet stats
4648  * @vdev_handle: DP VDEV handle
4649  * @val: value
4650  *
4651  * Return: none
4652  */
4653 static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
4654 {
4655 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4656 
4657 	vdev->wds_enabled = val;
4658 	return 0;
4659 }
4660 
4661 /*
4662  * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
4663  * @peer_handle:		Datapath peer handle
4664  *
4665  */
4666 static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
4667 						uint8_t vdev_id)
4668 {
4669 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
4670 	struct dp_vdev *vdev = NULL;
4671 
4672 	if (qdf_unlikely(!pdev))
4673 		return NULL;
4674 
4675 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4676 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4677 		if (vdev->vdev_id == vdev_id)
4678 			break;
4679 	}
4680 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4681 
4682 	return (struct cdp_vdev *)vdev;
4683 }
4684 
4685 static int dp_get_opmode(struct cdp_vdev *vdev_handle)
4686 {
4687 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4688 
4689 	return vdev->opmode;
4690 }
4691 
4692 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
4693 {
4694 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4695 	struct dp_pdev *pdev = vdev->pdev;
4696 
4697 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
4698 }
4699 
4700 /**
4701  * dp_reset_monitor_mode() - Disable monitor mode
4702  * @pdev_handle: Datapath PDEV handle
4703  *
4704  * Return: 0 on success, not 0 on failure
4705  */
4706 static int dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
4707 {
4708 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4709 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4710 	struct dp_soc *soc = pdev->soc;
4711 	uint8_t pdev_id;
4712 	int mac_id;
4713 
4714 	pdev_id = pdev->pdev_id;
4715 	soc = pdev->soc;
4716 
4717 	qdf_spin_lock_bh(&pdev->mon_lock);
4718 
4719 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4720 
4721 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4722 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4723 
4724 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4725 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4726 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4727 
4728 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4729 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4730 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4731 	}
4732 
4733 	pdev->monitor_vdev = NULL;
4734 
4735 	qdf_spin_unlock_bh(&pdev->mon_lock);
4736 
4737 	return 0;
4738 }
4739 
4740 /**
4741  * dp_set_nac() - set peer_nac
4742  * @peer_handle: Datapath PEER handle
4743  *
4744  * Return: void
4745  */
4746 static void dp_set_nac(struct cdp_peer *peer_handle)
4747 {
4748 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4749 
4750 	peer->nac = 1;
4751 }
4752 
4753 /**
4754  * dp_get_tx_pending() - read pending tx
4755  * @pdev_handle: Datapath PDEV handle
4756  *
4757  * Return: outstanding tx
4758  */
4759 static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
4760 {
4761 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4762 
4763 	return qdf_atomic_read(&pdev->num_tx_outstanding);
4764 }
4765 
4766 /**
4767  * dp_get_peer_mac_from_peer_id() - get peer mac
4768  * @pdev_handle: Datapath PDEV handle
4769  * @peer_id: Peer ID
4770  * @peer_mac: MAC addr of PEER
4771  *
4772  * Return: void
4773  */
4774 static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
4775 	uint32_t peer_id, uint8_t *peer_mac)
4776 {
4777 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4778 	struct dp_peer *peer;
4779 
4780 	if (pdev && peer_mac) {
4781 		peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
4782 		if (peer && peer->mac_addr.raw) {
4783 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
4784 					DP_MAC_ADDR_LEN);
4785 		}
4786 	}
4787 }
4788 
4789 /**
4790  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
4791  * @vdev_handle: Datapath VDEV handle
4792  * @smart_monitor: Flag to denote if its smart monitor mode
4793  *
4794  * Return: 0 on success, not 0 on failure
4795  */
4796 static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
4797 		uint8_t smart_monitor)
4798 {
4799 	/* Many monitor VAPs can exists in a system but only one can be up at
4800 	 * anytime
4801 	 */
4802 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4803 	struct dp_pdev *pdev;
4804 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4805 	struct dp_soc *soc;
4806 	uint8_t pdev_id;
4807 	int mac_id;
4808 
4809 	qdf_assert(vdev);
4810 
4811 	pdev = vdev->pdev;
4812 	pdev_id = pdev->pdev_id;
4813 	soc = pdev->soc;
4814 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4815 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
4816 		pdev, pdev_id, soc, vdev);
4817 
4818 	/*Check if current pdev's monitor_vdev exists */
4819 	if (pdev->monitor_vdev) {
4820 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4821 			"vdev=%pK", vdev);
4822 		qdf_assert(vdev);
4823 	}
4824 
4825 	pdev->monitor_vdev = vdev;
4826 
4827 	/* If smart monitor mode, do not configure monitor ring */
4828 	if (smart_monitor)
4829 		return QDF_STATUS_SUCCESS;
4830 
4831 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4832 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
4833 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4834 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4835 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4836 		pdev->mo_data_filter);
4837 
4838 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4839 
4840 	htt_tlv_filter.mpdu_start = 1;
4841 	htt_tlv_filter.msdu_start = 1;
4842 	htt_tlv_filter.packet = 1;
4843 	htt_tlv_filter.msdu_end = 1;
4844 	htt_tlv_filter.mpdu_end = 1;
4845 	htt_tlv_filter.packet_header = 1;
4846 	htt_tlv_filter.attention = 1;
4847 	htt_tlv_filter.ppdu_start = 0;
4848 	htt_tlv_filter.ppdu_end = 0;
4849 	htt_tlv_filter.ppdu_end_user_stats = 0;
4850 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4851 	htt_tlv_filter.ppdu_end_status_done = 0;
4852 	htt_tlv_filter.header_per_msdu = 1;
4853 	htt_tlv_filter.enable_fp =
4854 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4855 	htt_tlv_filter.enable_md = 0;
4856 	htt_tlv_filter.enable_mo =
4857 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4858 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4859 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4860 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4861 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4862 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4863 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4864 
4865 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4866 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4867 
4868 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4869 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4870 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4871 	}
4872 
4873 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4874 
4875 	htt_tlv_filter.mpdu_start = 1;
4876 	htt_tlv_filter.msdu_start = 0;
4877 	htt_tlv_filter.packet = 0;
4878 	htt_tlv_filter.msdu_end = 0;
4879 	htt_tlv_filter.mpdu_end = 0;
4880 	htt_tlv_filter.attention = 0;
4881 	htt_tlv_filter.ppdu_start = 1;
4882 	htt_tlv_filter.ppdu_end = 1;
4883 	htt_tlv_filter.ppdu_end_user_stats = 1;
4884 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4885 	htt_tlv_filter.ppdu_end_status_done = 1;
4886 	htt_tlv_filter.enable_fp = 1;
4887 	htt_tlv_filter.enable_md = 0;
4888 	htt_tlv_filter.enable_mo = 1;
4889 	if (pdev->mcopy_mode) {
4890 		htt_tlv_filter.packet_header = 1;
4891 	}
4892 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
4893 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
4894 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
4895 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
4896 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
4897 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
4898 
4899 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4900 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4901 						pdev->pdev_id);
4902 
4903 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4904 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4905 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4906 	}
4907 
4908 	return QDF_STATUS_SUCCESS;
4909 }
4910 
4911 /**
4912  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
4913  * @pdev_handle: Datapath PDEV handle
4914  * @filter_val: Flag to select Filter for monitor mode
4915  * Return: 0 on success, not 0 on failure
4916  */
4917 static int dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
4918 	struct cdp_monitor_filter *filter_val)
4919 {
4920 	/* Many monitor VAPs can exists in a system but only one can be up at
4921 	 * anytime
4922 	 */
4923 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4924 	struct dp_vdev *vdev = pdev->monitor_vdev;
4925 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4926 	struct dp_soc *soc;
4927 	uint8_t pdev_id;
4928 	int mac_id;
4929 
4930 	pdev_id = pdev->pdev_id;
4931 	soc = pdev->soc;
4932 
4933 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4934 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
4935 		pdev, pdev_id, soc, vdev);
4936 
4937 	/*Check if current pdev's monitor_vdev exists */
4938 	if (!pdev->monitor_vdev) {
4939 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4940 			"vdev=%pK", vdev);
4941 		qdf_assert(vdev);
4942 	}
4943 
4944 	/* update filter mode, type in pdev structure */
4945 	pdev->mon_filter_mode = filter_val->mode;
4946 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
4947 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
4948 	pdev->fp_data_filter = filter_val->fp_data;
4949 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
4950 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
4951 	pdev->mo_data_filter = filter_val->mo_data;
4952 
4953 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4954 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
4955 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4956 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4957 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4958 		pdev->mo_data_filter);
4959 
4960 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4961 
4962 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4963 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4964 
4965 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4966 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4967 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4968 
4969 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4970 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4971 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4972 	}
4973 
4974 	htt_tlv_filter.mpdu_start = 1;
4975 	htt_tlv_filter.msdu_start = 1;
4976 	htt_tlv_filter.packet = 1;
4977 	htt_tlv_filter.msdu_end = 1;
4978 	htt_tlv_filter.mpdu_end = 1;
4979 	htt_tlv_filter.packet_header = 1;
4980 	htt_tlv_filter.attention = 1;
4981 	htt_tlv_filter.ppdu_start = 0;
4982 	htt_tlv_filter.ppdu_end = 0;
4983 	htt_tlv_filter.ppdu_end_user_stats = 0;
4984 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4985 	htt_tlv_filter.ppdu_end_status_done = 0;
4986 	htt_tlv_filter.header_per_msdu = 1;
4987 	htt_tlv_filter.enable_fp =
4988 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4989 	htt_tlv_filter.enable_md = 0;
4990 	htt_tlv_filter.enable_mo =
4991 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4992 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4993 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4994 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4995 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4996 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4997 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4998 
4999 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5000 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5001 
5002 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5003 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
5004 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
5005 	}
5006 
5007 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5008 
5009 	htt_tlv_filter.mpdu_start = 1;
5010 	htt_tlv_filter.msdu_start = 0;
5011 	htt_tlv_filter.packet = 0;
5012 	htt_tlv_filter.msdu_end = 0;
5013 	htt_tlv_filter.mpdu_end = 0;
5014 	htt_tlv_filter.attention = 0;
5015 	htt_tlv_filter.ppdu_start = 1;
5016 	htt_tlv_filter.ppdu_end = 1;
5017 	htt_tlv_filter.ppdu_end_user_stats = 1;
5018 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5019 	htt_tlv_filter.ppdu_end_status_done = 1;
5020 	htt_tlv_filter.enable_fp = 1;
5021 	htt_tlv_filter.enable_md = 0;
5022 	htt_tlv_filter.enable_mo = 1;
5023 	if (pdev->mcopy_mode) {
5024 		htt_tlv_filter.packet_header = 1;
5025 	}
5026 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5027 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5028 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5029 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5030 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5031 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
5032 
5033 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5034 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5035 						pdev->pdev_id);
5036 
5037 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5038 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5039 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5040 	}
5041 
5042 	return QDF_STATUS_SUCCESS;
5043 }
5044 
5045 /**
5046  * dp_get_pdev_id_frm_pdev() - get pdev_id
5047  * @pdev_handle: Datapath PDEV handle
5048  *
5049  * Return: pdev_id
5050  */
5051 static
5052 uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
5053 {
5054 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5055 
5056 	return pdev->pdev_id;
5057 }
5058 
5059 /**
5060  * dp_pdev_set_chan_noise_floor() - set channel noise floor
5061  * @pdev_handle: Datapath PDEV handle
5062  * @chan_noise_floor: Channel Noise Floor
5063  *
5064  * Return: void
5065  */
5066 static
5067 void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
5068 				  int16_t chan_noise_floor)
5069 {
5070 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5071 
5072 	pdev->chan_noise_floor = chan_noise_floor;
5073 }
5074 
5075 /**
5076  * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
5077  * @vdev_handle: Datapath VDEV handle
5078  * Return: true on ucast filter flag set
5079  */
5080 static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
5081 {
5082 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5083 	struct dp_pdev *pdev;
5084 
5085 	pdev = vdev->pdev;
5086 
5087 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
5088 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
5089 		return true;
5090 
5091 	return false;
5092 }
5093 
5094 /**
5095  * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
5096  * @vdev_handle: Datapath VDEV handle
5097  * Return: true on mcast filter flag set
5098  */
5099 static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
5100 {
5101 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5102 	struct dp_pdev *pdev;
5103 
5104 	pdev = vdev->pdev;
5105 
5106 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
5107 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
5108 		return true;
5109 
5110 	return false;
5111 }
5112 
5113 /**
5114  * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
5115  * @vdev_handle: Datapath VDEV handle
5116  * Return: true on non data filter flag set
5117  */
5118 static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
5119 {
5120 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5121 	struct dp_pdev *pdev;
5122 
5123 	pdev = vdev->pdev;
5124 
5125 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
5126 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
5127 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
5128 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
5129 			return true;
5130 		}
5131 	}
5132 
5133 	return false;
5134 }
5135 
5136 #ifdef MESH_MODE_SUPPORT
5137 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
5138 {
5139 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5140 
5141 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5142 		FL("val %d"), val);
5143 	vdev->mesh_vdev = val;
5144 }
5145 
5146 /*
5147  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
5148  * @vdev_hdl: virtual device object
5149  * @val: value to be set
5150  *
5151  * Return: void
5152  */
5153 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
5154 {
5155 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5156 
5157 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5158 		FL("val %d"), val);
5159 	vdev->mesh_rx_filter = val;
5160 }
5161 #endif
5162 
5163 /*
5164  * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
5165  * Current scope is bar received count
5166  *
5167  * @pdev_handle: DP_PDEV handle
5168  *
5169  * Return: void
5170  */
5171 #define STATS_PROC_TIMEOUT        (HZ/1000)
5172 
5173 static void
5174 dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
5175 {
5176 	struct dp_vdev *vdev;
5177 	struct dp_peer *peer;
5178 	uint32_t waitcnt;
5179 
5180 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5181 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5182 			if (!peer) {
5183 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5184 					FL("DP Invalid Peer refernce"));
5185 				return;
5186 			}
5187 
5188 			if (peer->delete_in_progress) {
5189 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5190 					FL("DP Peer deletion in progress"));
5191 				continue;
5192 			}
5193 
5194 			qdf_atomic_inc(&peer->ref_cnt);
5195 			waitcnt = 0;
5196 			dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
5197 			while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
5198 				&& waitcnt < 10) {
5199 				schedule_timeout_interruptible(
5200 						STATS_PROC_TIMEOUT);
5201 				waitcnt++;
5202 			}
5203 			qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
5204 			dp_peer_unref_delete(peer);
5205 		}
5206 	}
5207 }
5208 
5209 /**
5210  * dp_rx_bar_stats_cb(): BAR received stats callback
5211  * @soc: SOC handle
5212  * @cb_ctxt: Call back context
5213  * @reo_status: Reo status
5214  *
5215  * return: void
5216  */
5217 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
5218 	union hal_reo_status *reo_status)
5219 {
5220 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
5221 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
5222 
5223 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
5224 		DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
5225 			queue_status->header.status);
5226 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
5227 		return;
5228 	}
5229 
5230 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
5231 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
5232 
5233 }
5234 
5235 /**
5236  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
5237  * @vdev: DP VDEV handle
5238  *
5239  * return: void
5240  */
5241 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
5242 			     struct cdp_vdev_stats *vdev_stats)
5243 {
5244 	struct dp_peer *peer = NULL;
5245 	struct dp_soc *soc = vdev->pdev->soc;
5246 
5247 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
5248 
5249 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
5250 		dp_update_vdev_stats(vdev_stats, peer);
5251 
5252 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5253 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
5254 			&vdev->stats, (uint16_t) vdev->vdev_id,
5255 			UPDATE_VDEV_STATS);
5256 
5257 }
5258 
5259 /**
5260  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
5261  * @pdev: DP PDEV handle
5262  *
5263  * return: void
5264  */
5265 static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
5266 {
5267 	struct dp_vdev *vdev = NULL;
5268 	struct dp_soc *soc = pdev->soc;
5269 	struct cdp_vdev_stats *vdev_stats =
5270 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
5271 
5272 	if (!vdev_stats) {
5273 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5274 			  "DP alloc failure - unable to get alloc vdev stats");
5275 		return;
5276 	}
5277 
5278 	qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
5279 	qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
5280 	qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
5281 
5282 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5283 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5284 
5285 		dp_aggregate_vdev_stats(vdev, vdev_stats);
5286 		dp_update_pdev_stats(pdev, vdev_stats);
5287 
5288 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
5289 
5290 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
5291 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
5292 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
5293 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
5294 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
5295 		DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
5296 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
5297 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host.num);
5298 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
5299 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host.num);
5300 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
5301 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
5302 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
5303 		DP_STATS_AGGR(pdev, vdev,
5304 				tx_i.mcast_en.dropped_map_error);
5305 		DP_STATS_AGGR(pdev, vdev,
5306 				tx_i.mcast_en.dropped_self_mac);
5307 		DP_STATS_AGGR(pdev, vdev,
5308 				tx_i.mcast_en.dropped_send_fail);
5309 		DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
5310 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
5311 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
5312 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
5313 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na.num);
5314 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
5315 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified);
5316 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw);
5317 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.exception_fw);
5318 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.completion_fw);
5319 
5320 		pdev->stats.tx_i.dropped.dropped_pkt.num =
5321 			pdev->stats.tx_i.dropped.dma_error +
5322 			pdev->stats.tx_i.dropped.ring_full +
5323 			pdev->stats.tx_i.dropped.enqueue_fail +
5324 			pdev->stats.tx_i.dropped.desc_na.num +
5325 			pdev->stats.tx_i.dropped.res_full;
5326 
5327 		pdev->stats.tx.last_ack_rssi =
5328 			vdev->stats.tx.last_ack_rssi;
5329 		pdev->stats.tx_i.tso.num_seg =
5330 			vdev->stats.tx_i.tso.num_seg;
5331 	}
5332 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5333 	qdf_mem_free(vdev_stats);
5334 
5335 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5336 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
5337 				&pdev->stats, pdev->pdev_id, UPDATE_PDEV_STATS);
5338 
5339 }
5340 
5341 /**
5342  * dp_vdev_getstats() - get vdev packet level stats
5343  * @vdev_handle: Datapath VDEV handle
5344  * @stats: cdp network device stats structure
5345  *
5346  * Return: void
5347  */
5348 static void dp_vdev_getstats(void *vdev_handle,
5349 		struct cdp_dev_stats *stats)
5350 {
5351 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5352 	struct cdp_vdev_stats *vdev_stats =
5353 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
5354 
5355 	if (!vdev_stats) {
5356 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5357 			  "DP alloc failure - unable to get alloc vdev stats");
5358 		return;
5359 	}
5360 
5361 	dp_aggregate_vdev_stats(vdev, vdev_stats);
5362 
5363 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
5364 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
5365 
5366 	stats->tx_errors = vdev_stats->tx.tx_failed +
5367 		vdev_stats->tx_i.dropped.dropped_pkt.num;
5368 	stats->tx_dropped = stats->tx_errors;
5369 
5370 	stats->rx_packets = vdev_stats->rx.unicast.num +
5371 		vdev_stats->rx.multicast.num +
5372 		vdev_stats->rx.bcast.num;
5373 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
5374 		vdev_stats->rx.multicast.bytes +
5375 		vdev_stats->rx.bcast.bytes;
5376 
5377 }
5378 
5379 
5380 /**
5381  * dp_pdev_getstats() - get pdev packet level stats
5382  * @pdev_handle: Datapath PDEV handle
5383  * @stats: cdp network device stats structure
5384  *
5385  * Return: void
5386  */
5387 static void dp_pdev_getstats(void *pdev_handle,
5388 		struct cdp_dev_stats *stats)
5389 {
5390 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5391 
5392 	dp_aggregate_pdev_stats(pdev);
5393 
5394 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
5395 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
5396 
5397 	stats->tx_errors = pdev->stats.tx.tx_failed +
5398 		pdev->stats.tx_i.dropped.dropped_pkt.num;
5399 	stats->tx_dropped = stats->tx_errors;
5400 
5401 	stats->rx_packets = pdev->stats.rx.unicast.num +
5402 		pdev->stats.rx.multicast.num +
5403 		pdev->stats.rx.bcast.num;
5404 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
5405 		pdev->stats.rx.multicast.bytes +
5406 		pdev->stats.rx.bcast.bytes;
5407 }
5408 
5409 /**
5410  * dp_get_device_stats() - get interface level packet stats
5411  * @handle: device handle
5412  * @stats: cdp network device stats structure
5413  * @type: device type pdev/vdev
5414  *
5415  * Return: void
5416  */
5417 static void dp_get_device_stats(void *handle,
5418 		struct cdp_dev_stats *stats, uint8_t type)
5419 {
5420 	switch (type) {
5421 	case UPDATE_VDEV_STATS:
5422 		dp_vdev_getstats(handle, stats);
5423 		break;
5424 	case UPDATE_PDEV_STATS:
5425 		dp_pdev_getstats(handle, stats);
5426 		break;
5427 	default:
5428 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5429 			"apstats cannot be updated for this input "
5430 			"type %d", type);
5431 		break;
5432 	}
5433 
5434 }
5435 
5436 
5437 /**
5438  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
5439  * @pdev: DP_PDEV Handle
5440  *
5441  * Return:void
5442  */
5443 static inline void
5444 dp_print_pdev_tx_stats(struct dp_pdev *pdev)
5445 {
5446 	uint8_t index = 0;
5447 	DP_PRINT_STATS("PDEV Tx Stats:\n");
5448 	DP_PRINT_STATS("Received From Stack:");
5449 	DP_PRINT_STATS("	Packets = %d",
5450 			pdev->stats.tx_i.rcvd.num);
5451 	DP_PRINT_STATS("	Bytes = %llu",
5452 			pdev->stats.tx_i.rcvd.bytes);
5453 	DP_PRINT_STATS("Processed:");
5454 	DP_PRINT_STATS("	Packets = %d",
5455 			pdev->stats.tx_i.processed.num);
5456 	DP_PRINT_STATS("	Bytes = %llu",
5457 			pdev->stats.tx_i.processed.bytes);
5458 	DP_PRINT_STATS("Total Completions:");
5459 	DP_PRINT_STATS("	Packets = %u",
5460 			pdev->stats.tx.comp_pkt.num);
5461 	DP_PRINT_STATS("	Bytes = %llu",
5462 			pdev->stats.tx.comp_pkt.bytes);
5463 	DP_PRINT_STATS("Successful Completions:");
5464 	DP_PRINT_STATS("	Packets = %u",
5465 			pdev->stats.tx.tx_success.num);
5466 	DP_PRINT_STATS("	Bytes = %llu",
5467 			pdev->stats.tx.tx_success.bytes);
5468 	DP_PRINT_STATS("Dropped:");
5469 	DP_PRINT_STATS("	Total = %d",
5470 			pdev->stats.tx_i.dropped.dropped_pkt.num);
5471 	DP_PRINT_STATS("	Dma_map_error = %d",
5472 			pdev->stats.tx_i.dropped.dma_error);
5473 	DP_PRINT_STATS("	Ring Full = %d",
5474 			pdev->stats.tx_i.dropped.ring_full);
5475 	DP_PRINT_STATS("	Descriptor Not available = %d",
5476 			pdev->stats.tx_i.dropped.desc_na.num);
5477 	DP_PRINT_STATS("	HW enqueue failed= %d",
5478 			pdev->stats.tx_i.dropped.enqueue_fail);
5479 	DP_PRINT_STATS("	Resources Full = %d",
5480 			pdev->stats.tx_i.dropped.res_full);
5481 	DP_PRINT_STATS("	FW removed = %d",
5482 			pdev->stats.tx.dropped.fw_rem);
5483 	DP_PRINT_STATS("	FW removed transmitted = %d",
5484 			pdev->stats.tx.dropped.fw_rem_tx);
5485 	DP_PRINT_STATS("	FW removed untransmitted = %d",
5486 			pdev->stats.tx.dropped.fw_rem_notx);
5487 	DP_PRINT_STATS("	FW removed untransmitted fw_reason1 = %d",
5488 			pdev->stats.tx.dropped.fw_reason1);
5489 	DP_PRINT_STATS("	FW removed untransmitted fw_reason2 = %d",
5490 			pdev->stats.tx.dropped.fw_reason2);
5491 	DP_PRINT_STATS("	FW removed untransmitted fw_reason3 = %d",
5492 			pdev->stats.tx.dropped.fw_reason3);
5493 	DP_PRINT_STATS("	Aged Out from msdu/mpdu queues = %d",
5494 			pdev->stats.tx.dropped.age_out);
5495 	DP_PRINT_STATS("	Multicast:");
5496 	DP_PRINT_STATS("	Packets: %u",
5497 		       pdev->stats.tx.mcast.num);
5498 	DP_PRINT_STATS("	Bytes: %llu",
5499 		       pdev->stats.tx.mcast.bytes);
5500 	DP_PRINT_STATS("Scatter Gather:");
5501 	DP_PRINT_STATS("	Packets = %d",
5502 			pdev->stats.tx_i.sg.sg_pkt.num);
5503 	DP_PRINT_STATS("	Bytes = %llu",
5504 			pdev->stats.tx_i.sg.sg_pkt.bytes);
5505 	DP_PRINT_STATS("	Dropped By Host = %d",
5506 			pdev->stats.tx_i.sg.dropped_host.num);
5507 	DP_PRINT_STATS("	Dropped By Target = %d",
5508 			pdev->stats.tx_i.sg.dropped_target);
5509 	DP_PRINT_STATS("TSO:");
5510 	DP_PRINT_STATS("	Number of Segments = %d",
5511 			pdev->stats.tx_i.tso.num_seg);
5512 	DP_PRINT_STATS("	Packets = %d",
5513 			pdev->stats.tx_i.tso.tso_pkt.num);
5514 	DP_PRINT_STATS("	Bytes = %llu",
5515 			pdev->stats.tx_i.tso.tso_pkt.bytes);
5516 	DP_PRINT_STATS("	Dropped By Host = %d",
5517 			pdev->stats.tx_i.tso.dropped_host.num);
5518 	DP_PRINT_STATS("Mcast Enhancement:");
5519 	DP_PRINT_STATS("	Packets = %d",
5520 			pdev->stats.tx_i.mcast_en.mcast_pkt.num);
5521 	DP_PRINT_STATS("	Bytes = %llu",
5522 			pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
5523 	DP_PRINT_STATS("	Dropped: Map Errors = %d",
5524 			pdev->stats.tx_i.mcast_en.dropped_map_error);
5525 	DP_PRINT_STATS("	Dropped: Self Mac = %d",
5526 			pdev->stats.tx_i.mcast_en.dropped_self_mac);
5527 	DP_PRINT_STATS("	Dropped: Send Fail = %d",
5528 			pdev->stats.tx_i.mcast_en.dropped_send_fail);
5529 	DP_PRINT_STATS("	Unicast sent = %d",
5530 			pdev->stats.tx_i.mcast_en.ucast);
5531 	DP_PRINT_STATS("Raw:");
5532 	DP_PRINT_STATS("	Packets = %d",
5533 			pdev->stats.tx_i.raw.raw_pkt.num);
5534 	DP_PRINT_STATS("	Bytes = %llu",
5535 			pdev->stats.tx_i.raw.raw_pkt.bytes);
5536 	DP_PRINT_STATS("	DMA map error = %d",
5537 			pdev->stats.tx_i.raw.dma_map_error);
5538 	DP_PRINT_STATS("Reinjected:");
5539 	DP_PRINT_STATS("	Packets = %d",
5540 			pdev->stats.tx_i.reinject_pkts.num);
5541 	DP_PRINT_STATS("	Bytes = %llu\n",
5542 			pdev->stats.tx_i.reinject_pkts.bytes);
5543 	DP_PRINT_STATS("Inspected:");
5544 	DP_PRINT_STATS("	Packets = %d",
5545 			pdev->stats.tx_i.inspect_pkts.num);
5546 	DP_PRINT_STATS("	Bytes = %llu",
5547 			pdev->stats.tx_i.inspect_pkts.bytes);
5548 	DP_PRINT_STATS("Nawds Multicast:");
5549 	DP_PRINT_STATS("	Packets = %d",
5550 			pdev->stats.tx_i.nawds_mcast.num);
5551 	DP_PRINT_STATS("	Bytes = %llu",
5552 			pdev->stats.tx_i.nawds_mcast.bytes);
5553 	DP_PRINT_STATS("CCE Classified:");
5554 	DP_PRINT_STATS("	CCE Classified Packets: %u",
5555 			pdev->stats.tx_i.cce_classified);
5556 	DP_PRINT_STATS("	RAW CCE Classified Packets: %u",
5557 			pdev->stats.tx_i.cce_classified_raw);
5558 	DP_PRINT_STATS("Mesh stats:");
5559 	DP_PRINT_STATS("	frames to firmware: %u",
5560 			pdev->stats.tx_i.mesh.exception_fw);
5561 	DP_PRINT_STATS("	completions from fw: %u",
5562 			pdev->stats.tx_i.mesh.completion_fw);
5563 	DP_PRINT_STATS("PPDU stats counter");
5564 	for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
5565 		DP_PRINT_STATS("	Tag[%d] = %llu", index,
5566 				pdev->stats.ppdu_stats_counter[index]);
5567 	}
5568 }
5569 
5570 /**
5571  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
5572  * @pdev: DP_PDEV Handle
5573  *
5574  * Return: void
5575  */
5576 static inline void
5577 dp_print_pdev_rx_stats(struct dp_pdev *pdev)
5578 {
5579 	DP_PRINT_STATS("PDEV Rx Stats:\n");
5580 	DP_PRINT_STATS("Received From HW (Per Rx Ring):");
5581 	DP_PRINT_STATS("	Packets = %d %d %d %d",
5582 			pdev->stats.rx.rcvd_reo[0].num,
5583 			pdev->stats.rx.rcvd_reo[1].num,
5584 			pdev->stats.rx.rcvd_reo[2].num,
5585 			pdev->stats.rx.rcvd_reo[3].num);
5586 	DP_PRINT_STATS("	Bytes = %llu %llu %llu %llu",
5587 			pdev->stats.rx.rcvd_reo[0].bytes,
5588 			pdev->stats.rx.rcvd_reo[1].bytes,
5589 			pdev->stats.rx.rcvd_reo[2].bytes,
5590 			pdev->stats.rx.rcvd_reo[3].bytes);
5591 	DP_PRINT_STATS("Replenished:");
5592 	DP_PRINT_STATS("	Packets = %d",
5593 			pdev->stats.replenish.pkts.num);
5594 	DP_PRINT_STATS("	Bytes = %llu",
5595 			pdev->stats.replenish.pkts.bytes);
5596 	DP_PRINT_STATS("	Buffers Added To Freelist = %d",
5597 			pdev->stats.buf_freelist);
5598 	DP_PRINT_STATS("	Low threshold intr = %d",
5599 			pdev->stats.replenish.low_thresh_intrs);
5600 	DP_PRINT_STATS("Dropped:");
5601 	DP_PRINT_STATS("	msdu_not_done = %d",
5602 			pdev->stats.dropped.msdu_not_done);
5603 	DP_PRINT_STATS("        mon_rx_drop = %d",
5604 			pdev->stats.dropped.mon_rx_drop);
5605 	DP_PRINT_STATS("Sent To Stack:");
5606 	DP_PRINT_STATS("	Packets = %d",
5607 			pdev->stats.rx.to_stack.num);
5608 	DP_PRINT_STATS("	Bytes = %llu",
5609 			pdev->stats.rx.to_stack.bytes);
5610 	DP_PRINT_STATS("Multicast/Broadcast:");
5611 	DP_PRINT_STATS("	Packets = %d",
5612 			(pdev->stats.rx.multicast.num +
5613 			pdev->stats.rx.bcast.num));
5614 	DP_PRINT_STATS("	Bytes = %llu",
5615 			(pdev->stats.rx.multicast.bytes +
5616 			pdev->stats.rx.bcast.bytes));
5617 	DP_PRINT_STATS("Errors:");
5618 	DP_PRINT_STATS("	Rxdma Ring Un-inititalized = %d",
5619 			pdev->stats.replenish.rxdma_err);
5620 	DP_PRINT_STATS("	Desc Alloc Failed: = %d",
5621 			pdev->stats.err.desc_alloc_fail);
5622 	DP_PRINT_STATS("	IP checksum error = %d",
5623 		       pdev->stats.err.ip_csum_err);
5624 	DP_PRINT_STATS("	TCP/UDP checksum error = %d",
5625 		       pdev->stats.err.tcp_udp_csum_err);
5626 
5627 	/* Get bar_recv_cnt */
5628 	dp_aggregate_pdev_ctrl_frames_stats(pdev);
5629 	DP_PRINT_STATS("BAR Received Count: = %d",
5630 			pdev->stats.rx.bar_recv_cnt);
5631 
5632 }
5633 
5634 /**
5635  * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
5636  * @pdev: DP_PDEV Handle
5637  *
5638  * Return: void
5639  */
5640 static inline void
5641 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
5642 {
5643 	struct cdp_pdev_mon_stats *rx_mon_stats;
5644 
5645 	rx_mon_stats = &pdev->rx_mon_stats;
5646 
5647 	DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
5648 
5649 	dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
5650 
5651 	DP_PRINT_STATS("status_ppdu_done_cnt = %d",
5652 		       rx_mon_stats->status_ppdu_done);
5653 	DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
5654 		       rx_mon_stats->dest_ppdu_done);
5655 	DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
5656 		       rx_mon_stats->dest_mpdu_done);
5657 	DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
5658 		       rx_mon_stats->dest_mpdu_drop);
5659 }
5660 
5661 /**
5662  * dp_print_soc_tx_stats(): Print SOC level  stats
5663  * @soc DP_SOC Handle
5664  *
5665  * Return: void
5666  */
5667 static inline void
5668 dp_print_soc_tx_stats(struct dp_soc *soc)
5669 {
5670 	uint8_t desc_pool_id;
5671 	soc->stats.tx.desc_in_use = 0;
5672 
5673 	DP_PRINT_STATS("SOC Tx Stats:\n");
5674 
5675 	for (desc_pool_id = 0;
5676 	     desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5677 	     desc_pool_id++)
5678 		soc->stats.tx.desc_in_use +=
5679 			soc->tx_desc[desc_pool_id].num_allocated;
5680 
5681 	DP_PRINT_STATS("Tx Descriptors In Use = %d",
5682 			soc->stats.tx.desc_in_use);
5683 	DP_PRINT_STATS("Invalid peer:");
5684 	DP_PRINT_STATS("	Packets = %d",
5685 			soc->stats.tx.tx_invalid_peer.num);
5686 	DP_PRINT_STATS("	Bytes = %llu",
5687 			soc->stats.tx.tx_invalid_peer.bytes);
5688 	DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
5689 			soc->stats.tx.tcl_ring_full[0],
5690 			soc->stats.tx.tcl_ring_full[1],
5691 			soc->stats.tx.tcl_ring_full[2]);
5692 
5693 }
5694 /**
5695  * dp_print_soc_rx_stats: Print SOC level Rx stats
5696  * @soc: DP_SOC Handle
5697  *
5698  * Return:void
5699  */
5700 static inline void
5701 dp_print_soc_rx_stats(struct dp_soc *soc)
5702 {
5703 	uint32_t i;
5704 	char reo_error[DP_REO_ERR_LENGTH];
5705 	char rxdma_error[DP_RXDMA_ERR_LENGTH];
5706 	uint8_t index = 0;
5707 
5708 	DP_PRINT_STATS("SOC Rx Stats:\n");
5709 	DP_PRINT_STATS("Errors:\n");
5710 	DP_PRINT_STATS("Rx Decrypt Errors = %d",
5711 			(soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
5712 			soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
5713 	DP_PRINT_STATS("Invalid RBM = %d",
5714 			soc->stats.rx.err.invalid_rbm);
5715 	DP_PRINT_STATS("Invalid Vdev = %d",
5716 			soc->stats.rx.err.invalid_vdev);
5717 	DP_PRINT_STATS("Invalid Pdev = %d",
5718 			soc->stats.rx.err.invalid_pdev);
5719 	DP_PRINT_STATS("Invalid Peer = %d",
5720 			soc->stats.rx.err.rx_invalid_peer.num);
5721 	DP_PRINT_STATS("HAL Ring Access Fail = %d",
5722 			soc->stats.rx.err.hal_ring_access_fail);
5723 
5724 	for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
5725 		index += qdf_snprint(&rxdma_error[index],
5726 				DP_RXDMA_ERR_LENGTH - index,
5727 				" %d", soc->stats.rx.err.rxdma_error[i]);
5728 	}
5729 	DP_PRINT_STATS("RXDMA Error (0-31):%s",
5730 			rxdma_error);
5731 
5732 	index = 0;
5733 	for (i = 0; i < HAL_REO_ERR_MAX; i++) {
5734 		index += qdf_snprint(&reo_error[index],
5735 				DP_REO_ERR_LENGTH - index,
5736 				" %d", soc->stats.rx.err.reo_error[i]);
5737 	}
5738 	DP_PRINT_STATS("REO Error(0-14):%s",
5739 			reo_error);
5740 }
5741 
5742 
5743 /**
5744  * dp_print_ring_stat_from_hal(): Print hal level ring stats
5745  * @soc: DP_SOC handle
5746  * @srng: DP_SRNG handle
5747  * @ring_name: SRNG name
5748  *
5749  * Return: void
5750  */
5751 static inline void
5752 dp_print_ring_stat_from_hal(struct dp_soc *soc,  struct dp_srng *srng,
5753 	char *ring_name)
5754 {
5755 	uint32_t tailp;
5756 	uint32_t headp;
5757 
5758 	if (srng->hal_srng != NULL) {
5759 		hal_api_get_tphp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
5760 		DP_PRINT_STATS("%s : Head pointer = %d  Tail Pointer = %d\n",
5761 				ring_name, headp, tailp);
5762 	}
5763 }
5764 
5765 /**
5766  * dp_print_ring_stats(): Print tail and head pointer
5767  * @pdev: DP_PDEV handle
5768  *
5769  * Return:void
5770  */
5771 static inline void
5772 dp_print_ring_stats(struct dp_pdev *pdev)
5773 {
5774 	uint32_t i;
5775 	char ring_name[STR_MAXLEN + 1];
5776 	int mac_id;
5777 
5778 	dp_print_ring_stat_from_hal(pdev->soc,
5779 			&pdev->soc->reo_exception_ring,
5780 			"Reo Exception Ring");
5781 	dp_print_ring_stat_from_hal(pdev->soc,
5782 			&pdev->soc->reo_reinject_ring,
5783 			"Reo Inject Ring");
5784 	dp_print_ring_stat_from_hal(pdev->soc,
5785 			&pdev->soc->reo_cmd_ring,
5786 			"Reo Command Ring");
5787 	dp_print_ring_stat_from_hal(pdev->soc,
5788 			&pdev->soc->reo_status_ring,
5789 			"Reo Status Ring");
5790 	dp_print_ring_stat_from_hal(pdev->soc,
5791 			&pdev->soc->rx_rel_ring,
5792 			"Rx Release ring");
5793 	dp_print_ring_stat_from_hal(pdev->soc,
5794 			&pdev->soc->tcl_cmd_ring,
5795 			"Tcl command Ring");
5796 	dp_print_ring_stat_from_hal(pdev->soc,
5797 			&pdev->soc->tcl_status_ring,
5798 			"Tcl Status Ring");
5799 	dp_print_ring_stat_from_hal(pdev->soc,
5800 			&pdev->soc->wbm_desc_rel_ring,
5801 			"Wbm Desc Rel Ring");
5802 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5803 		snprintf(ring_name, STR_MAXLEN, "Reo Dest Ring %d", i);
5804 		dp_print_ring_stat_from_hal(pdev->soc,
5805 				&pdev->soc->reo_dest_ring[i],
5806 				ring_name);
5807 	}
5808 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
5809 		snprintf(ring_name, STR_MAXLEN, "Tcl Data Ring %d", i);
5810 		dp_print_ring_stat_from_hal(pdev->soc,
5811 				&pdev->soc->tcl_data_ring[i],
5812 				ring_name);
5813 	}
5814 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
5815 		snprintf(ring_name, STR_MAXLEN, "Tx Comp Ring %d", i);
5816 		dp_print_ring_stat_from_hal(pdev->soc,
5817 				&pdev->soc->tx_comp_ring[i],
5818 				ring_name);
5819 	}
5820 	dp_print_ring_stat_from_hal(pdev->soc,
5821 			&pdev->rx_refill_buf_ring,
5822 			"Rx Refill Buf Ring");
5823 
5824 	dp_print_ring_stat_from_hal(pdev->soc,
5825 			&pdev->rx_refill_buf_ring2,
5826 			"Second Rx Refill Buf Ring");
5827 
5828 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5829 		dp_print_ring_stat_from_hal(pdev->soc,
5830 				&pdev->rxdma_mon_buf_ring[mac_id],
5831 				"Rxdma Mon Buf Ring");
5832 		dp_print_ring_stat_from_hal(pdev->soc,
5833 				&pdev->rxdma_mon_dst_ring[mac_id],
5834 				"Rxdma Mon Dst Ring");
5835 		dp_print_ring_stat_from_hal(pdev->soc,
5836 				&pdev->rxdma_mon_status_ring[mac_id],
5837 				"Rxdma Mon Status Ring");
5838 		dp_print_ring_stat_from_hal(pdev->soc,
5839 				&pdev->rxdma_mon_desc_ring[mac_id],
5840 				"Rxdma mon desc Ring");
5841 	}
5842 
5843 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
5844 		snprintf(ring_name, STR_MAXLEN, "Rxdma err dst ring %d", i);
5845 		dp_print_ring_stat_from_hal(pdev->soc,
5846 			&pdev->rxdma_err_dst_ring[i],
5847 			ring_name);
5848 	}
5849 
5850 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
5851 		snprintf(ring_name, STR_MAXLEN, "Rx mac buf ring %d", i);
5852 		dp_print_ring_stat_from_hal(pdev->soc,
5853 				&pdev->rx_mac_buf_ring[i],
5854 				ring_name);
5855 	}
5856 }
5857 
5858 /**
5859  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
5860  * @vdev: DP_VDEV handle
5861  *
5862  * Return:void
5863  */
5864 static inline void
5865 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
5866 {
5867 	struct dp_peer *peer = NULL;
5868 	struct dp_soc *soc = (struct dp_soc *)vdev->pdev->soc;
5869 
5870 	DP_STATS_CLR(vdev->pdev);
5871 	DP_STATS_CLR(vdev->pdev->soc);
5872 	DP_STATS_CLR(vdev);
5873 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5874 		if (!peer)
5875 			return;
5876 		DP_STATS_CLR(peer);
5877 
5878 		if (soc->cdp_soc.ol_ops->update_dp_stats) {
5879 			soc->cdp_soc.ol_ops->update_dp_stats(
5880 					vdev->pdev->ctrl_pdev,
5881 					&peer->stats,
5882 					peer->peer_ids[0],
5883 					UPDATE_PEER_STATS);
5884 		}
5885 
5886 	}
5887 
5888 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5889 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
5890 				&vdev->stats, (uint16_t)vdev->vdev_id,
5891 				UPDATE_VDEV_STATS);
5892 }
5893 
5894 /**
5895  * dp_print_rx_rates(): Print Rx rate stats
5896  * @vdev: DP_VDEV handle
5897  *
5898  * Return:void
5899  */
5900 static inline void
5901 dp_print_rx_rates(struct dp_vdev *vdev)
5902 {
5903 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5904 	uint8_t i, mcs, pkt_type;
5905 	uint8_t index = 0;
5906 	char nss[DP_NSS_LENGTH];
5907 
5908 	DP_PRINT_STATS("Rx Rate Info:\n");
5909 
5910 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5911 		index = 0;
5912 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5913 			if (!dp_rate_string[pkt_type][mcs].valid)
5914 				continue;
5915 
5916 			DP_PRINT_STATS("	%s = %d",
5917 					dp_rate_string[pkt_type][mcs].mcs_type,
5918 					pdev->stats.rx.pkt_type[pkt_type].
5919 					mcs_count[mcs]);
5920 		}
5921 
5922 		DP_PRINT_STATS("\n");
5923 	}
5924 
5925 	index = 0;
5926 	for (i = 0; i < SS_COUNT; i++) {
5927 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5928 				" %d", pdev->stats.rx.nss[i]);
5929 	}
5930 	DP_PRINT_STATS("NSS(1-8) = %s",
5931 			nss);
5932 
5933 	DP_PRINT_STATS("SGI ="
5934 			" 0.8us %d,"
5935 			" 0.4us %d,"
5936 			" 1.6us %d,"
5937 			" 3.2us %d,",
5938 			pdev->stats.rx.sgi_count[0],
5939 			pdev->stats.rx.sgi_count[1],
5940 			pdev->stats.rx.sgi_count[2],
5941 			pdev->stats.rx.sgi_count[3]);
5942 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
5943 			pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
5944 			pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
5945 	DP_PRINT_STATS("Reception Type ="
5946 			" SU: %d,"
5947 			" MU_MIMO:%d,"
5948 			" MU_OFDMA:%d,"
5949 			" MU_OFDMA_MIMO:%d\n",
5950 			pdev->stats.rx.reception_type[0],
5951 			pdev->stats.rx.reception_type[1],
5952 			pdev->stats.rx.reception_type[2],
5953 			pdev->stats.rx.reception_type[3]);
5954 	DP_PRINT_STATS("Aggregation:\n");
5955 	DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
5956 			pdev->stats.rx.ampdu_cnt);
5957 	DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
5958 			pdev->stats.rx.non_ampdu_cnt);
5959 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
5960 			pdev->stats.rx.amsdu_cnt);
5961 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
5962 			pdev->stats.rx.non_amsdu_cnt);
5963 }
5964 
5965 /**
5966  * dp_print_tx_rates(): Print tx rates
5967  * @vdev: DP_VDEV handle
5968  *
5969  * Return:void
5970  */
5971 static inline void
5972 dp_print_tx_rates(struct dp_vdev *vdev)
5973 {
5974 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5975 	uint8_t mcs, pkt_type;
5976 	uint8_t index;
5977 	char nss[DP_NSS_LENGTH];
5978 	int nss_index;
5979 
5980 	DP_PRINT_STATS("Tx Rate Info:\n");
5981 
5982 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5983 		index = 0;
5984 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5985 			if (!dp_rate_string[pkt_type][mcs].valid)
5986 				continue;
5987 
5988 			DP_PRINT_STATS("	%s = %d",
5989 					dp_rate_string[pkt_type][mcs].mcs_type,
5990 					pdev->stats.tx.pkt_type[pkt_type].
5991 					mcs_count[mcs]);
5992 		}
5993 
5994 		DP_PRINT_STATS("\n");
5995 	}
5996 
5997 	DP_PRINT_STATS("SGI ="
5998 			" 0.8us %d"
5999 			" 0.4us %d"
6000 			" 1.6us %d"
6001 			" 3.2us %d",
6002 			pdev->stats.tx.sgi_count[0],
6003 			pdev->stats.tx.sgi_count[1],
6004 			pdev->stats.tx.sgi_count[2],
6005 			pdev->stats.tx.sgi_count[3]);
6006 
6007 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
6008 			pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
6009 			pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
6010 
6011 	index = 0;
6012 	for (nss_index = 0; nss_index < SS_COUNT; nss_index++) {
6013 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6014 				" %d", pdev->stats.tx.nss[nss_index]);
6015 	}
6016 
6017 	DP_PRINT_STATS("NSS(1-8) = %s", nss);
6018 	DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
6019 	DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
6020 	DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
6021 	DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
6022 	DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
6023 
6024 	DP_PRINT_STATS("Aggregation:\n");
6025 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
6026 			pdev->stats.tx.amsdu_cnt);
6027 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
6028 			pdev->stats.tx.non_amsdu_cnt);
6029 }
6030 
6031 /**
6032  * dp_print_peer_stats():print peer stats
6033  * @peer: DP_PEER handle
6034  *
6035  * return void
6036  */
6037 static inline void dp_print_peer_stats(struct dp_peer *peer)
6038 {
6039 	uint8_t i, mcs, pkt_type;
6040 	uint32_t index;
6041 	char nss[DP_NSS_LENGTH];
6042 	DP_PRINT_STATS("Node Tx Stats:\n");
6043 	DP_PRINT_STATS("Total Packet Completions = %d",
6044 			peer->stats.tx.comp_pkt.num);
6045 	DP_PRINT_STATS("Total Bytes Completions = %llu",
6046 			peer->stats.tx.comp_pkt.bytes);
6047 	DP_PRINT_STATS("Success Packets = %d",
6048 			peer->stats.tx.tx_success.num);
6049 	DP_PRINT_STATS("Success Bytes = %llu",
6050 			peer->stats.tx.tx_success.bytes);
6051 	DP_PRINT_STATS("Unicast Success Packets = %d",
6052 			peer->stats.tx.ucast.num);
6053 	DP_PRINT_STATS("Unicast Success Bytes = %llu",
6054 			peer->stats.tx.ucast.bytes);
6055 	DP_PRINT_STATS("Multicast Success Packets = %d",
6056 			peer->stats.tx.mcast.num);
6057 	DP_PRINT_STATS("Multicast Success Bytes = %llu",
6058 			peer->stats.tx.mcast.bytes);
6059 	DP_PRINT_STATS("Broadcast Success Packets = %d",
6060 			peer->stats.tx.bcast.num);
6061 	DP_PRINT_STATS("Broadcast Success Bytes = %llu",
6062 			peer->stats.tx.bcast.bytes);
6063 	DP_PRINT_STATS("Packets Failed = %d",
6064 			peer->stats.tx.tx_failed);
6065 	DP_PRINT_STATS("Packets In OFDMA = %d",
6066 			peer->stats.tx.ofdma);
6067 	DP_PRINT_STATS("Packets In STBC = %d",
6068 			peer->stats.tx.stbc);
6069 	DP_PRINT_STATS("Packets In LDPC = %d",
6070 			peer->stats.tx.ldpc);
6071 	DP_PRINT_STATS("Packet Retries = %d",
6072 			peer->stats.tx.retries);
6073 	DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
6074 			peer->stats.tx.amsdu_cnt);
6075 	DP_PRINT_STATS("Last Packet RSSI = %d",
6076 			peer->stats.tx.last_ack_rssi);
6077 	DP_PRINT_STATS("Dropped At FW: Removed = %d",
6078 			peer->stats.tx.dropped.fw_rem);
6079 	DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
6080 			peer->stats.tx.dropped.fw_rem_tx);
6081 	DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
6082 			peer->stats.tx.dropped.fw_rem_notx);
6083 	DP_PRINT_STATS("Dropped : Age Out = %d",
6084 			peer->stats.tx.dropped.age_out);
6085 	DP_PRINT_STATS("NAWDS : ");
6086 	DP_PRINT_STATS("	Nawds multicast Drop Tx Packet = %d",
6087 			peer->stats.tx.nawds_mcast_drop);
6088 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Count = %d",
6089 			peer->stats.tx.nawds_mcast.num);
6090 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Bytes = %llu",
6091 			peer->stats.tx.nawds_mcast.bytes);
6092 
6093 	DP_PRINT_STATS("Rate Info:");
6094 
6095 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
6096 		index = 0;
6097 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
6098 			if (!dp_rate_string[pkt_type][mcs].valid)
6099 				continue;
6100 
6101 			DP_PRINT_STATS("	%s = %d",
6102 					dp_rate_string[pkt_type][mcs].mcs_type,
6103 					peer->stats.tx.pkt_type[pkt_type].
6104 					mcs_count[mcs]);
6105 		}
6106 
6107 		DP_PRINT_STATS("\n");
6108 	}
6109 
6110 	DP_PRINT_STATS("SGI = "
6111 			" 0.8us %d"
6112 			" 0.4us %d"
6113 			" 1.6us %d"
6114 			" 3.2us %d",
6115 			peer->stats.tx.sgi_count[0],
6116 			peer->stats.tx.sgi_count[1],
6117 			peer->stats.tx.sgi_count[2],
6118 			peer->stats.tx.sgi_count[3]);
6119 	DP_PRINT_STATS("Excess Retries per AC ");
6120 	DP_PRINT_STATS("	 Best effort = %d",
6121 			peer->stats.tx.excess_retries_per_ac[0]);
6122 	DP_PRINT_STATS("	 Background= %d",
6123 			peer->stats.tx.excess_retries_per_ac[1]);
6124 	DP_PRINT_STATS("	 Video = %d",
6125 			peer->stats.tx.excess_retries_per_ac[2]);
6126 	DP_PRINT_STATS("	 Voice = %d",
6127 			peer->stats.tx.excess_retries_per_ac[3]);
6128 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
6129 			peer->stats.tx.bw[2], peer->stats.tx.bw[3],
6130 			peer->stats.tx.bw[4], peer->stats.tx.bw[5]);
6131 
6132 	index = 0;
6133 	for (i = 0; i < SS_COUNT; i++) {
6134 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6135 				" %d", peer->stats.tx.nss[i]);
6136 	}
6137 	DP_PRINT_STATS("NSS(1-8) = %s",
6138 			nss);
6139 
6140 	DP_PRINT_STATS("Aggregation:");
6141 	DP_PRINT_STATS("	Number of Msdu's Part of Amsdu = %d",
6142 			peer->stats.tx.amsdu_cnt);
6143 	DP_PRINT_STATS("	Number of Msdu's With No Msdu Level Aggregation = %d\n",
6144 			peer->stats.tx.non_amsdu_cnt);
6145 
6146 	DP_PRINT_STATS("Node Rx Stats:");
6147 	DP_PRINT_STATS("Packets Sent To Stack = %d",
6148 			peer->stats.rx.to_stack.num);
6149 	DP_PRINT_STATS("Bytes Sent To Stack = %llu",
6150 			peer->stats.rx.to_stack.bytes);
6151 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
6152 		DP_PRINT_STATS("Ring Id = %d", i);
6153 		DP_PRINT_STATS("	Packets Received = %d",
6154 				peer->stats.rx.rcvd_reo[i].num);
6155 		DP_PRINT_STATS("	Bytes Received = %llu",
6156 				peer->stats.rx.rcvd_reo[i].bytes);
6157 	}
6158 	DP_PRINT_STATS("Multicast Packets Received = %d",
6159 			peer->stats.rx.multicast.num);
6160 	DP_PRINT_STATS("Multicast Bytes Received = %llu",
6161 			peer->stats.rx.multicast.bytes);
6162 	DP_PRINT_STATS("Broadcast Packets Received = %d",
6163 			peer->stats.rx.bcast.num);
6164 	DP_PRINT_STATS("Broadcast Bytes Received = %llu",
6165 			peer->stats.rx.bcast.bytes);
6166 	DP_PRINT_STATS("Intra BSS Packets Received = %d",
6167 			peer->stats.rx.intra_bss.pkts.num);
6168 	DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
6169 			peer->stats.rx.intra_bss.pkts.bytes);
6170 	DP_PRINT_STATS("Raw Packets Received = %d",
6171 			peer->stats.rx.raw.num);
6172 	DP_PRINT_STATS("Raw Bytes Received = %llu",
6173 			peer->stats.rx.raw.bytes);
6174 	DP_PRINT_STATS("Errors: MIC Errors = %d",
6175 			peer->stats.rx.err.mic_err);
6176 	DP_PRINT_STATS("Erros: Decryption Errors = %d",
6177 			peer->stats.rx.err.decrypt_err);
6178 	DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
6179 			peer->stats.rx.non_ampdu_cnt);
6180 	DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
6181 			peer->stats.rx.ampdu_cnt);
6182 	DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
6183 			peer->stats.rx.non_amsdu_cnt);
6184 	DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
6185 			peer->stats.rx.amsdu_cnt);
6186 	DP_PRINT_STATS("NAWDS : ");
6187 	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet = %d",
6188 			peer->stats.rx.nawds_mcast_drop);
6189 	DP_PRINT_STATS("SGI ="
6190 			" 0.8us %d"
6191 			" 0.4us %d"
6192 			" 1.6us %d"
6193 			" 3.2us %d",
6194 			peer->stats.rx.sgi_count[0],
6195 			peer->stats.rx.sgi_count[1],
6196 			peer->stats.rx.sgi_count[2],
6197 			peer->stats.rx.sgi_count[3]);
6198 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
6199 			peer->stats.rx.bw[0], peer->stats.rx.bw[1],
6200 			peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
6201 	DP_PRINT_STATS("Reception Type ="
6202 			" SU %d,"
6203 			" MU_MIMO %d,"
6204 			" MU_OFDMA %d,"
6205 			" MU_OFDMA_MIMO %d",
6206 			peer->stats.rx.reception_type[0],
6207 			peer->stats.rx.reception_type[1],
6208 			peer->stats.rx.reception_type[2],
6209 			peer->stats.rx.reception_type[3]);
6210 
6211 
6212 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
6213 		index = 0;
6214 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
6215 			if (!dp_rate_string[pkt_type][mcs].valid)
6216 				continue;
6217 
6218 			DP_PRINT_STATS("	%s = %d",
6219 					dp_rate_string[pkt_type][mcs].mcs_type,
6220 					peer->stats.rx.pkt_type[pkt_type].
6221 					mcs_count[mcs]);
6222 		}
6223 
6224 		DP_PRINT_STATS("\n");
6225 	}
6226 
6227 	index = 0;
6228 	for (i = 0; i < SS_COUNT; i++) {
6229 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6230 				" %d", peer->stats.rx.nss[i]);
6231 	}
6232 	DP_PRINT_STATS("NSS(1-8) = %s",
6233 			nss);
6234 
6235 	DP_PRINT_STATS("Aggregation:");
6236 	DP_PRINT_STATS("	Msdu's Part of Ampdu = %d",
6237 			peer->stats.rx.ampdu_cnt);
6238 	DP_PRINT_STATS("	Msdu's With No Mpdu Level Aggregation = %d",
6239 			peer->stats.rx.non_ampdu_cnt);
6240 	DP_PRINT_STATS("	Msdu's Part of Amsdu = %d",
6241 			peer->stats.rx.amsdu_cnt);
6242 	DP_PRINT_STATS("	Msdu's With No Msdu Level Aggregation = %d",
6243 			peer->stats.rx.non_amsdu_cnt);
6244 }
6245 
6246 /*
6247  * dp_get_host_peer_stats()- function to print peer stats
6248  * @pdev_handle: DP_PDEV handle
6249  * @mac_addr: mac address of the peer
6250  *
6251  * Return: void
6252  */
6253 static void
6254 dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
6255 {
6256 	struct dp_peer *peer;
6257 	uint8_t local_id;
6258 
6259 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
6260 			&local_id);
6261 
6262 	if (!peer) {
6263 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6264 			  "%s: Invalid peer\n", __func__);
6265 		return;
6266 	}
6267 
6268 	dp_print_peer_stats(peer);
6269 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
6270 }
6271 
6272 /**
6273  * dp_print_host_stats()- Function to print the stats aggregated at host
6274  * @vdev_handle: DP_VDEV handle
6275  * @type: host stats type
6276  *
6277  * Available Stat types
6278  * TXRX_CLEAR_STATS  : Clear the stats
6279  * TXRX_RX_RATE_STATS: Print Rx Rate Info
6280  * TXRX_TX_RATE_STATS: Print Tx Rate Info
6281  * TXRX_TX_HOST_STATS: Print Tx Stats
6282  * TXRX_RX_HOST_STATS: Print Rx Stats
6283  * TXRX_AST_STATS: Print AST Stats
6284  * TXRX_SRNG_PTR_STATS: Print SRNG ring pointer stats
6285  *
6286  * Return: 0 on success, print error message in case of failure
6287  */
6288 static int
6289 dp_print_host_stats(struct cdp_vdev *vdev_handle,
6290 		    struct cdp_txrx_stats_req *req)
6291 {
6292 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6293 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6294 	enum cdp_host_txrx_stats type =
6295 			dp_stats_mapping_table[req->stats][STATS_HOST];
6296 
6297 	dp_aggregate_pdev_stats(pdev);
6298 
6299 	switch (type) {
6300 	case TXRX_CLEAR_STATS:
6301 		dp_txrx_host_stats_clr(vdev);
6302 		break;
6303 	case TXRX_RX_RATE_STATS:
6304 		dp_print_rx_rates(vdev);
6305 		break;
6306 	case TXRX_TX_RATE_STATS:
6307 		dp_print_tx_rates(vdev);
6308 		break;
6309 	case TXRX_TX_HOST_STATS:
6310 		dp_print_pdev_tx_stats(pdev);
6311 		dp_print_soc_tx_stats(pdev->soc);
6312 		break;
6313 	case TXRX_RX_HOST_STATS:
6314 		dp_print_pdev_rx_stats(pdev);
6315 		dp_print_soc_rx_stats(pdev->soc);
6316 		break;
6317 	case TXRX_AST_STATS:
6318 		dp_print_ast_stats(pdev->soc);
6319 		dp_print_peer_table(vdev);
6320 		break;
6321 	case TXRX_SRNG_PTR_STATS:
6322 		dp_print_ring_stats(pdev);
6323 		break;
6324 	case TXRX_RX_MON_STATS:
6325 		dp_print_pdev_rx_mon_stats(pdev);
6326 		break;
6327 	case TXRX_REO_QUEUE_STATS:
6328 		dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
6329 		break;
6330 	default:
6331 		DP_TRACE(FATAL, "Wrong Input For TxRx Host Stats");
6332 		break;
6333 	}
6334 	return 0;
6335 }
6336 
6337 /*
6338  * dp_ppdu_ring_reset()- Reset PPDU Stats ring
6339  * @pdev: DP_PDEV handle
6340  *
6341  * Return: void
6342  */
6343 static void
6344 dp_ppdu_ring_reset(struct dp_pdev *pdev)
6345 {
6346 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
6347 	int mac_id;
6348 
6349 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
6350 
6351 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6352 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6353 							pdev->pdev_id);
6354 
6355 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6356 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6357 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6358 	}
6359 }
6360 
6361 /*
6362  * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
6363  * @pdev: DP_PDEV handle
6364  *
6365  * Return: void
6366  */
6367 static void
6368 dp_ppdu_ring_cfg(struct dp_pdev *pdev)
6369 {
6370 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
6371 	int mac_id;
6372 
6373 	htt_tlv_filter.mpdu_start = 1;
6374 	htt_tlv_filter.msdu_start = 0;
6375 	htt_tlv_filter.packet = 0;
6376 	htt_tlv_filter.msdu_end = 0;
6377 	htt_tlv_filter.mpdu_end = 0;
6378 	htt_tlv_filter.attention = 0;
6379 	htt_tlv_filter.ppdu_start = 1;
6380 	htt_tlv_filter.ppdu_end = 1;
6381 	htt_tlv_filter.ppdu_end_user_stats = 1;
6382 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6383 	htt_tlv_filter.ppdu_end_status_done = 1;
6384 	htt_tlv_filter.enable_fp = 1;
6385 	htt_tlv_filter.enable_md = 0;
6386 	if (pdev->mcopy_mode) {
6387 		htt_tlv_filter.packet_header = 1;
6388 		htt_tlv_filter.enable_mo = 1;
6389 	}
6390 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6391 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6392 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6393 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6394 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6395 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
6396 
6397 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6398 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6399 						pdev->pdev_id);
6400 
6401 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6402 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6403 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6404 	}
6405 }
6406 
6407 /*
6408  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
6409  *                              modes are enabled or not.
6410  * @dp_pdev: dp pdev handle.
6411  *
6412  * Return: bool
6413  */
6414 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
6415 {
6416 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
6417 	    !pdev->mcopy_mode)
6418 		return true;
6419 	else
6420 		return false;
6421 }
6422 
6423 /*
6424  *dp_set_bpr_enable() - API to enable/disable bpr feature
6425  *@pdev_handle: DP_PDEV handle.
6426  *@val: Provided value.
6427  *
6428  *Return: void
6429  */
6430 static void
6431 dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
6432 {
6433 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6434 
6435 	switch (val) {
6436 	case CDP_BPR_DISABLE:
6437 		pdev->bpr_enable = CDP_BPR_DISABLE;
6438 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6439 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
6440 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6441 		} else if (pdev->enhanced_stats_en &&
6442 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6443 			   !pdev->pktlog_ppdu_stats) {
6444 			dp_h2t_cfg_stats_msg_send(pdev,
6445 						  DP_PPDU_STATS_CFG_ENH_STATS,
6446 						  pdev->pdev_id);
6447 		}
6448 		break;
6449 	case CDP_BPR_ENABLE:
6450 		pdev->bpr_enable = CDP_BPR_ENABLE;
6451 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
6452 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
6453 			dp_h2t_cfg_stats_msg_send(pdev,
6454 						  DP_PPDU_STATS_CFG_BPR,
6455 						  pdev->pdev_id);
6456 		} else if (pdev->enhanced_stats_en &&
6457 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6458 			   !pdev->pktlog_ppdu_stats) {
6459 			dp_h2t_cfg_stats_msg_send(pdev,
6460 						  DP_PPDU_STATS_CFG_BPR_ENH,
6461 						  pdev->pdev_id);
6462 		} else if (pdev->pktlog_ppdu_stats) {
6463 			dp_h2t_cfg_stats_msg_send(pdev,
6464 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
6465 						  pdev->pdev_id);
6466 		}
6467 		break;
6468 	default:
6469 		break;
6470 	}
6471 }
6472 
6473 /*
6474  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
6475  * @pdev_handle: DP_PDEV handle
6476  * @val: user provided value
6477  *
6478  * Return: void
6479  */
6480 static void
6481 dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
6482 {
6483 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6484 
6485 	switch (val) {
6486 	case 0:
6487 		pdev->tx_sniffer_enable = 0;
6488 		pdev->mcopy_mode = 0;
6489 
6490 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6491 		    !pdev->bpr_enable) {
6492 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6493 			dp_ppdu_ring_reset(pdev);
6494 		} else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
6495 			dp_h2t_cfg_stats_msg_send(pdev,
6496 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
6497 		} else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
6498 			dp_h2t_cfg_stats_msg_send(pdev,
6499 						  DP_PPDU_STATS_CFG_BPR_ENH,
6500 						  pdev->pdev_id);
6501 		} else {
6502 			dp_h2t_cfg_stats_msg_send(pdev,
6503 						  DP_PPDU_STATS_CFG_BPR,
6504 						  pdev->pdev_id);
6505 		}
6506 		break;
6507 
6508 	case 1:
6509 		pdev->tx_sniffer_enable = 1;
6510 		pdev->mcopy_mode = 0;
6511 
6512 		if (!pdev->pktlog_ppdu_stats)
6513 			dp_h2t_cfg_stats_msg_send(pdev,
6514 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
6515 		break;
6516 	case 2:
6517 		pdev->mcopy_mode = 1;
6518 		pdev->tx_sniffer_enable = 0;
6519 		dp_ppdu_ring_cfg(pdev);
6520 
6521 		if (!pdev->pktlog_ppdu_stats)
6522 			dp_h2t_cfg_stats_msg_send(pdev,
6523 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
6524 		break;
6525 	default:
6526 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6527 			"Invalid value");
6528 		break;
6529 	}
6530 }
6531 
6532 /*
6533  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
6534  * @pdev_handle: DP_PDEV handle
6535  *
6536  * Return: void
6537  */
6538 static void
6539 dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
6540 {
6541 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6542 	pdev->enhanced_stats_en = 1;
6543 
6544 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added)
6545 		dp_ppdu_ring_cfg(pdev);
6546 
6547 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
6548 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
6549 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
6550 		dp_h2t_cfg_stats_msg_send(pdev,
6551 					  DP_PPDU_STATS_CFG_BPR_ENH,
6552 					  pdev->pdev_id);
6553 	}
6554 }
6555 
6556 /*
6557  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
6558  * @pdev_handle: DP_PDEV handle
6559  *
6560  * Return: void
6561  */
6562 static void
6563 dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
6564 {
6565 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6566 
6567 	pdev->enhanced_stats_en = 0;
6568 
6569 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
6570 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6571 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
6572 		dp_h2t_cfg_stats_msg_send(pdev,
6573 					  DP_PPDU_STATS_CFG_BPR,
6574 					  pdev->pdev_id);
6575 	}
6576 
6577 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added)
6578 		dp_ppdu_ring_reset(pdev);
6579 }
6580 
6581 /*
6582  * dp_get_fw_peer_stats()- function to print peer stats
6583  * @pdev_handle: DP_PDEV handle
6584  * @mac_addr: mac address of the peer
6585  * @cap: Type of htt stats requested
6586  *
6587  * Currently Supporting only MAC ID based requests Only
6588  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
6589  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
6590  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
6591  *
6592  * Return: void
6593  */
6594 static void
6595 dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
6596 		uint32_t cap)
6597 {
6598 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6599 	int i;
6600 	uint32_t config_param0 = 0;
6601 	uint32_t config_param1 = 0;
6602 	uint32_t config_param2 = 0;
6603 	uint32_t config_param3 = 0;
6604 
6605 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
6606 	config_param0 |= (1 << (cap + 1));
6607 
6608 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
6609 		config_param1 |= (1 << i);
6610 	}
6611 
6612 	config_param2 |= (mac_addr[0] & 0x000000ff);
6613 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
6614 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
6615 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
6616 
6617 	config_param3 |= (mac_addr[4] & 0x000000ff);
6618 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
6619 
6620 	dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
6621 			config_param0, config_param1, config_param2,
6622 			config_param3, 0, 0, 0);
6623 
6624 }
6625 
6626 /* This struct definition will be removed from here
6627  * once it get added in FW headers*/
6628 struct httstats_cmd_req {
6629     uint32_t    config_param0;
6630     uint32_t    config_param1;
6631     uint32_t    config_param2;
6632     uint32_t    config_param3;
6633     int cookie;
6634     u_int8_t    stats_id;
6635 };
6636 
6637 /*
6638  * dp_get_htt_stats: function to process the httstas request
6639  * @pdev_handle: DP pdev handle
6640  * @data: pointer to request data
6641  * @data_len: length for request data
6642  *
6643  * return: void
6644  */
6645 static void
6646 dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
6647 {
6648 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6649 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
6650 
6651 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
6652 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
6653 				req->config_param0, req->config_param1,
6654 				req->config_param2, req->config_param3,
6655 				req->cookie, 0, 0);
6656 }
6657 
6658 /*
6659  * dp_set_pdev_param: function to set parameters in pdev
6660  * @pdev_handle: DP pdev handle
6661  * @param: parameter type to be set
6662  * @val: value of parameter to be set
6663  *
6664  * return: void
6665  */
6666 static void dp_set_pdev_param(struct cdp_pdev *pdev_handle,
6667 		enum cdp_pdev_param_type param, uint8_t val)
6668 {
6669 	switch (param) {
6670 	case CDP_CONFIG_DEBUG_SNIFFER:
6671 		dp_config_debug_sniffer(pdev_handle, val);
6672 		break;
6673 	case CDP_CONFIG_BPR_ENABLE:
6674 		dp_set_bpr_enable(pdev_handle, val);
6675 		break;
6676 	default:
6677 		break;
6678 	}
6679 }
6680 
6681 /*
6682  * dp_set_vdev_param: function to set parameters in vdev
6683  * @param: parameter type to be set
6684  * @val: value of parameter to be set
6685  *
6686  * return: void
6687  */
6688 static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
6689 		enum cdp_vdev_param_type param, uint32_t val)
6690 {
6691 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6692 	switch (param) {
6693 	case CDP_ENABLE_WDS:
6694 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6695 			  "wds_enable %d for vdev(%p) id(%d)\n",
6696 			  val, vdev, vdev->vdev_id);
6697 		vdev->wds_enabled = val;
6698 		break;
6699 	case CDP_ENABLE_NAWDS:
6700 		vdev->nawds_enabled = val;
6701 		break;
6702 	case CDP_ENABLE_MCAST_EN:
6703 		vdev->mcast_enhancement_en = val;
6704 		break;
6705 	case CDP_ENABLE_PROXYSTA:
6706 		vdev->proxysta_vdev = val;
6707 		break;
6708 	case CDP_UPDATE_TDLS_FLAGS:
6709 		vdev->tdls_link_connected = val;
6710 		break;
6711 	case CDP_CFG_WDS_AGING_TIMER:
6712 		if (val == 0)
6713 			qdf_timer_stop(&vdev->pdev->soc->wds_aging_timer);
6714 		else if (val != vdev->wds_aging_timer_val)
6715 			qdf_timer_mod(&vdev->pdev->soc->wds_aging_timer, val);
6716 
6717 		vdev->wds_aging_timer_val = val;
6718 		break;
6719 	case CDP_ENABLE_AP_BRIDGE:
6720 		if (wlan_op_mode_sta != vdev->opmode)
6721 			vdev->ap_bridge_enabled = val;
6722 		else
6723 			vdev->ap_bridge_enabled = false;
6724 		break;
6725 	case CDP_ENABLE_CIPHER:
6726 		vdev->sec_type = val;
6727 		break;
6728 	case CDP_ENABLE_QWRAP_ISOLATION:
6729 		vdev->isolation_vdev = val;
6730 		break;
6731 	default:
6732 		break;
6733 	}
6734 
6735 	dp_tx_vdev_update_search_flags(vdev);
6736 }
6737 
6738 /**
6739  * dp_peer_set_nawds: set nawds bit in peer
6740  * @peer_handle: pointer to peer
6741  * @value: enable/disable nawds
6742  *
6743  * return: void
6744  */
6745 static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
6746 {
6747 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6748 	peer->nawds_enabled = value;
6749 }
6750 
6751 /*
6752  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
6753  * @vdev_handle: DP_VDEV handle
6754  * @map_id:ID of map that needs to be updated
6755  *
6756  * Return: void
6757  */
6758 static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
6759 		uint8_t map_id)
6760 {
6761 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6762 	vdev->dscp_tid_map_id = map_id;
6763 	return;
6764 }
6765 
6766 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
6767  * @peer_handle: DP_PEER handle
6768  *
6769  * return : cdp_peer_stats pointer
6770  */
6771 static struct cdp_peer_stats*
6772 		dp_txrx_get_peer_stats(struct cdp_peer *peer_handle)
6773 {
6774 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6775 
6776 	qdf_assert(peer);
6777 
6778 	return &peer->stats;
6779 }
6780 
6781 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
6782  * @peer_handle: DP_PEER handle
6783  *
6784  * return : void
6785  */
6786 static void dp_txrx_reset_peer_stats(struct cdp_peer *peer_handle)
6787 {
6788 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6789 
6790 	qdf_assert(peer);
6791 
6792 	qdf_mem_set(&peer->stats, sizeof(peer->stats), 0);
6793 }
6794 
6795 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
6796  * @vdev_handle: DP_VDEV handle
6797  * @buf: buffer for vdev stats
6798  *
6799  * return : int
6800  */
6801 static int  dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
6802 				   bool is_aggregate)
6803 {
6804 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6805 	struct cdp_vdev_stats *vdev_stats = (struct cdp_vdev_stats *)buf;
6806 
6807 	if (is_aggregate)
6808 		dp_aggregate_vdev_stats(vdev, buf);
6809 	else
6810 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
6811 
6812 	return 0;
6813 }
6814 
6815 /*
6816  * dp_txrx_stats_publish(): publish pdev stats into a buffer
6817  * @pdev_handle: DP_PDEV handle
6818  * @buf: to hold pdev_stats
6819  *
6820  * Return: int
6821  */
6822 static int
6823 dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
6824 {
6825 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6826 	struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
6827 	struct cdp_txrx_stats_req req = {0,};
6828 
6829 	dp_aggregate_pdev_stats(pdev);
6830 	req.stats = HTT_DBG_EXT_STATS_PDEV_TX;
6831 	req.cookie_val = 1;
6832 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
6833 				req.param1, req.param2, req.param3, 0,
6834 				req.cookie_val, 0);
6835 
6836 	msleep(DP_MAX_SLEEP_TIME);
6837 
6838 	req.stats = HTT_DBG_EXT_STATS_PDEV_RX;
6839 	req.cookie_val = 1;
6840 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
6841 				req.param1, req.param2, req.param3, 0,
6842 				req.cookie_val, 0);
6843 
6844 	msleep(DP_MAX_SLEEP_TIME);
6845 	qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
6846 
6847 	return TXRX_STATS_LEVEL;
6848 }
6849 
6850 /**
6851  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
6852  * @pdev: DP_PDEV handle
6853  * @map_id: ID of map that needs to be updated
6854  * @tos: index value in map
6855  * @tid: tid value passed by the user
6856  *
6857  * Return: void
6858  */
6859 static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
6860 		uint8_t map_id, uint8_t tos, uint8_t tid)
6861 {
6862 	uint8_t dscp;
6863 	struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
6864 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
6865 	pdev->dscp_tid_map[map_id][dscp] = tid;
6866 	if (map_id < HAL_MAX_HW_DSCP_TID_MAPS)
6867 		hal_tx_update_dscp_tid(pdev->soc->hal_soc, tid,
6868 			map_id, dscp);
6869 	return;
6870 }
6871 
6872 /**
6873  * dp_fw_stats_process(): Process TxRX FW stats request
6874  * @vdev_handle: DP VDEV handle
6875  * @req: stats request
6876  *
6877  * return: int
6878  */
6879 static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
6880 		struct cdp_txrx_stats_req *req)
6881 {
6882 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6883 	struct dp_pdev *pdev = NULL;
6884 	uint32_t stats = req->stats;
6885 	uint8_t mac_id = req->mac_id;
6886 
6887 	if (!vdev) {
6888 		DP_TRACE(NONE, "VDEV not found");
6889 		return 1;
6890 	}
6891 	pdev = vdev->pdev;
6892 
6893 	/*
6894 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
6895 	 * from param0 to param3 according to below rule:
6896 	 *
6897 	 * PARAM:
6898 	 *   - config_param0 : start_offset (stats type)
6899 	 *   - config_param1 : stats bmask from start offset
6900 	 *   - config_param2 : stats bmask from start offset + 32
6901 	 *   - config_param3 : stats bmask from start offset + 64
6902 	 */
6903 	if (req->stats == CDP_TXRX_STATS_0) {
6904 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
6905 		req->param1 = 0xFFFFFFFF;
6906 		req->param2 = 0xFFFFFFFF;
6907 		req->param3 = 0xFFFFFFFF;
6908 	}
6909 
6910 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
6911 				req->param1, req->param2, req->param3,
6912 				0, 0, mac_id);
6913 }
6914 
6915 /**
6916  * dp_txrx_stats_request - function to map to firmware and host stats
6917  * @vdev: virtual handle
6918  * @req: stats request
6919  *
6920  * Return: integer
6921  */
6922 static int dp_txrx_stats_request(struct cdp_vdev *vdev,
6923 		struct cdp_txrx_stats_req *req)
6924 {
6925 	int host_stats;
6926 	int fw_stats;
6927 	enum cdp_stats stats;
6928 
6929 	if (!vdev || !req) {
6930 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6931 				"Invalid vdev/req instance");
6932 		return 0;
6933 	}
6934 
6935 	stats = req->stats;
6936 	if (stats >= CDP_TXRX_MAX_STATS)
6937 		return 0;
6938 
6939 	/*
6940 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
6941 	 *			has to be updated if new FW HTT stats added
6942 	 */
6943 	if (stats > CDP_TXRX_STATS_HTT_MAX)
6944 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
6945 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
6946 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
6947 
6948 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6949 		 "stats: %u fw_stats_type: %d host_stats_type: %d",
6950 		  stats, fw_stats, host_stats);
6951 
6952 	if (fw_stats != TXRX_FW_STATS_INVALID) {
6953 		/* update request with FW stats type */
6954 		req->stats = fw_stats;
6955 		return dp_fw_stats_process(vdev, req);
6956 	}
6957 
6958 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
6959 			(host_stats <= TXRX_HOST_STATS_MAX))
6960 		return dp_print_host_stats(vdev, req);
6961 	else
6962 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6963 				"Wrong Input for TxRx Stats");
6964 
6965 	return 0;
6966 }
6967 
6968 /*
6969  * dp_print_napi_stats(): NAPI stats
6970  * @soc - soc handle
6971  */
6972 static void dp_print_napi_stats(struct dp_soc *soc)
6973 {
6974 	hif_print_napi_stats(soc->hif_handle);
6975 }
6976 
6977 /*
6978  * dp_print_per_ring_stats(): Packet count per ring
6979  * @soc - soc handle
6980  */
6981 static void dp_print_per_ring_stats(struct dp_soc *soc)
6982 {
6983 	uint8_t ring;
6984 	uint16_t core;
6985 	uint64_t total_packets;
6986 
6987 	DP_TRACE(FATAL, "Reo packets per ring:");
6988 	for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
6989 		total_packets = 0;
6990 		DP_TRACE(FATAL, "Packets on ring %u:", ring);
6991 		for (core = 0; core < NR_CPUS; core++) {
6992 			DP_TRACE(FATAL, "Packets arriving on core %u: %llu",
6993 				core, soc->stats.rx.ring_packets[core][ring]);
6994 			total_packets += soc->stats.rx.ring_packets[core][ring];
6995 		}
6996 		DP_TRACE(FATAL, "Total packets on ring %u: %llu",
6997 			ring, total_packets);
6998 	}
6999 }
7000 
7001 /*
7002  * dp_txrx_path_stats() - Function to display dump stats
7003  * @soc - soc handle
7004  *
7005  * return: none
7006  */
7007 static void dp_txrx_path_stats(struct dp_soc *soc)
7008 {
7009 	uint8_t error_code;
7010 	uint8_t loop_pdev;
7011 	struct dp_pdev *pdev;
7012 	uint8_t i;
7013 
7014 	for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
7015 
7016 		pdev = soc->pdev_list[loop_pdev];
7017 		dp_aggregate_pdev_stats(pdev);
7018 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7019 			"Tx path Statistics:");
7020 
7021 		DP_TRACE(FATAL, "from stack: %u msdus (%llu bytes)",
7022 			pdev->stats.tx_i.rcvd.num,
7023 			pdev->stats.tx_i.rcvd.bytes);
7024 		DP_TRACE(FATAL, "processed from host: %u msdus (%llu bytes)",
7025 			pdev->stats.tx_i.processed.num,
7026 			pdev->stats.tx_i.processed.bytes);
7027 		DP_TRACE(FATAL, "successfully transmitted: %u msdus (%llu bytes)",
7028 			pdev->stats.tx.tx_success.num,
7029 			pdev->stats.tx.tx_success.bytes);
7030 
7031 		DP_TRACE(FATAL, "Dropped in host:");
7032 		DP_TRACE(FATAL, "Total packets dropped: %u,",
7033 			pdev->stats.tx_i.dropped.dropped_pkt.num);
7034 		DP_TRACE(FATAL, "Descriptor not available: %u",
7035 			pdev->stats.tx_i.dropped.desc_na.num);
7036 		DP_TRACE(FATAL, "Ring full: %u",
7037 			pdev->stats.tx_i.dropped.ring_full);
7038 		DP_TRACE(FATAL, "Enqueue fail: %u",
7039 			pdev->stats.tx_i.dropped.enqueue_fail);
7040 		DP_TRACE(FATAL, "DMA Error: %u",
7041 			pdev->stats.tx_i.dropped.dma_error);
7042 
7043 		DP_TRACE(FATAL, "Dropped in hardware:");
7044 		DP_TRACE(FATAL, "total packets dropped: %u",
7045 			pdev->stats.tx.tx_failed);
7046 		DP_TRACE(FATAL, "mpdu age out: %u",
7047 			pdev->stats.tx.dropped.age_out);
7048 		DP_TRACE(FATAL, "firmware removed: %u",
7049 			pdev->stats.tx.dropped.fw_rem);
7050 		DP_TRACE(FATAL, "firmware removed tx: %u",
7051 			pdev->stats.tx.dropped.fw_rem_tx);
7052 		DP_TRACE(FATAL, "firmware removed notx %u",
7053 			pdev->stats.tx.dropped.fw_rem_notx);
7054 		DP_TRACE(FATAL, "peer_invalid: %u",
7055 			pdev->soc->stats.tx.tx_invalid_peer.num);
7056 
7057 
7058 		DP_TRACE(FATAL, "Tx packets sent per interrupt:");
7059 		DP_TRACE(FATAL, "Single Packet: %u",
7060 			pdev->stats.tx_comp_histogram.pkts_1);
7061 		DP_TRACE(FATAL, "2-20 Packets:  %u",
7062 			pdev->stats.tx_comp_histogram.pkts_2_20);
7063 		DP_TRACE(FATAL, "21-40 Packets: %u",
7064 			pdev->stats.tx_comp_histogram.pkts_21_40);
7065 		DP_TRACE(FATAL, "41-60 Packets: %u",
7066 			pdev->stats.tx_comp_histogram.pkts_41_60);
7067 		DP_TRACE(FATAL, "61-80 Packets: %u",
7068 			pdev->stats.tx_comp_histogram.pkts_61_80);
7069 		DP_TRACE(FATAL, "81-100 Packets: %u",
7070 			pdev->stats.tx_comp_histogram.pkts_81_100);
7071 		DP_TRACE(FATAL, "101-200 Packets: %u",
7072 			pdev->stats.tx_comp_histogram.pkts_101_200);
7073 		DP_TRACE(FATAL, "   201+ Packets: %u",
7074 			pdev->stats.tx_comp_histogram.pkts_201_plus);
7075 
7076 		DP_TRACE(FATAL, "Rx path statistics");
7077 
7078 		DP_TRACE(FATAL, "delivered %u msdus ( %llu bytes),",
7079 			pdev->stats.rx.to_stack.num,
7080 			pdev->stats.rx.to_stack.bytes);
7081 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)
7082 			DP_TRACE(FATAL, "received on reo[%d] %u msdus ( %llu bytes),",
7083 					i, pdev->stats.rx.rcvd_reo[i].num,
7084 					pdev->stats.rx.rcvd_reo[i].bytes);
7085 		DP_TRACE(FATAL, "intra-bss packets %u msdus ( %llu bytes),",
7086 			pdev->stats.rx.intra_bss.pkts.num,
7087 			pdev->stats.rx.intra_bss.pkts.bytes);
7088 		DP_TRACE(FATAL, "intra-bss fails %u msdus ( %llu bytes),",
7089 			pdev->stats.rx.intra_bss.fail.num,
7090 			pdev->stats.rx.intra_bss.fail.bytes);
7091 		DP_TRACE(FATAL, "raw packets %u msdus ( %llu bytes),",
7092 			pdev->stats.rx.raw.num,
7093 			pdev->stats.rx.raw.bytes);
7094 		DP_TRACE(FATAL, "dropped: error %u msdus",
7095 			pdev->stats.rx.err.mic_err);
7096 		DP_TRACE(FATAL, "peer invalid %u",
7097 			pdev->soc->stats.rx.err.rx_invalid_peer.num);
7098 
7099 		DP_TRACE(FATAL, "Reo Statistics");
7100 		DP_TRACE(FATAL, "rbm error: %u msdus",
7101 			pdev->soc->stats.rx.err.invalid_rbm);
7102 		DP_TRACE(FATAL, "hal ring access fail: %u msdus",
7103 			pdev->soc->stats.rx.err.hal_ring_access_fail);
7104 
7105 		for (error_code = 0; error_code < HAL_REO_ERR_MAX;
7106 				error_code++) {
7107 			if (!pdev->soc->stats.rx.err.reo_error[error_code])
7108 				continue;
7109 			DP_TRACE(FATAL, "Reo error number (%u): %u msdus",
7110 				error_code,
7111 				pdev->soc->stats.rx.err.reo_error[error_code]);
7112 		}
7113 
7114 		for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
7115 				error_code++) {
7116 			if (!pdev->soc->stats.rx.err.rxdma_error[error_code])
7117 				continue;
7118 			DP_TRACE(FATAL, "Rxdma error number (%u): %u msdus",
7119 				error_code,
7120 				pdev->soc->stats.rx.err
7121 				.rxdma_error[error_code]);
7122 		}
7123 
7124 		DP_TRACE(FATAL, "Rx packets reaped per interrupt:");
7125 		DP_TRACE(FATAL, "Single Packet: %u",
7126 			 pdev->stats.rx_ind_histogram.pkts_1);
7127 		DP_TRACE(FATAL, "2-20 Packets:  %u",
7128 			 pdev->stats.rx_ind_histogram.pkts_2_20);
7129 		DP_TRACE(FATAL, "21-40 Packets: %u",
7130 			 pdev->stats.rx_ind_histogram.pkts_21_40);
7131 		DP_TRACE(FATAL, "41-60 Packets: %u",
7132 			 pdev->stats.rx_ind_histogram.pkts_41_60);
7133 		DP_TRACE(FATAL, "61-80 Packets: %u",
7134 			 pdev->stats.rx_ind_histogram.pkts_61_80);
7135 		DP_TRACE(FATAL, "81-100 Packets: %u",
7136 			 pdev->stats.rx_ind_histogram.pkts_81_100);
7137 		DP_TRACE(FATAL, "101-200 Packets: %u",
7138 			 pdev->stats.rx_ind_histogram.pkts_101_200);
7139 		DP_TRACE(FATAL, "   201+ Packets: %u",
7140 			 pdev->stats.rx_ind_histogram.pkts_201_plus);
7141 
7142 		DP_TRACE_STATS(ERROR, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
7143 			__func__,
7144 			pdev->soc->wlan_cfg_ctx->tso_enabled,
7145 			pdev->soc->wlan_cfg_ctx->lro_enabled,
7146 			pdev->soc->wlan_cfg_ctx->rx_hash,
7147 			pdev->soc->wlan_cfg_ctx->napi_enabled);
7148 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7149 		DP_TRACE_STATS(ERROR, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
7150 			__func__,
7151 			pdev->soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold,
7152 			pdev->soc->wlan_cfg_ctx->tx_flow_start_queue_offset);
7153 #endif
7154 	}
7155 }
7156 
7157 /*
7158  * dp_txrx_dump_stats() -  Dump statistics
7159  * @value - Statistics option
7160  */
7161 static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
7162 				     enum qdf_stats_verbosity_level level)
7163 {
7164 	struct dp_soc *soc =
7165 		(struct dp_soc *)psoc;
7166 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7167 
7168 	if (!soc) {
7169 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7170 			"%s: soc is NULL", __func__);
7171 		return QDF_STATUS_E_INVAL;
7172 	}
7173 
7174 	switch (value) {
7175 	case CDP_TXRX_PATH_STATS:
7176 		dp_txrx_path_stats(soc);
7177 		break;
7178 
7179 	case CDP_RX_RING_STATS:
7180 		dp_print_per_ring_stats(soc);
7181 		break;
7182 
7183 	case CDP_TXRX_TSO_STATS:
7184 		/* TODO: NOT IMPLEMENTED */
7185 		break;
7186 
7187 	case CDP_DUMP_TX_FLOW_POOL_INFO:
7188 		cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
7189 		break;
7190 
7191 	case CDP_DP_NAPI_STATS:
7192 		dp_print_napi_stats(soc);
7193 		break;
7194 
7195 	case CDP_TXRX_DESC_STATS:
7196 		/* TODO: NOT IMPLEMENTED */
7197 		break;
7198 
7199 	default:
7200 		status = QDF_STATUS_E_INVAL;
7201 		break;
7202 	}
7203 
7204 	return status;
7205 
7206 }
7207 
7208 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7209 /**
7210  * dp_update_flow_control_parameters() - API to store datapath
7211  *                            config parameters
7212  * @soc: soc handle
7213  * @cfg: ini parameter handle
7214  *
7215  * Return: void
7216  */
7217 static inline
7218 void dp_update_flow_control_parameters(struct dp_soc *soc,
7219 				struct cdp_config_params *params)
7220 {
7221 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
7222 					params->tx_flow_stop_queue_threshold;
7223 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
7224 					params->tx_flow_start_queue_offset;
7225 }
7226 #else
7227 static inline
7228 void dp_update_flow_control_parameters(struct dp_soc *soc,
7229 				struct cdp_config_params *params)
7230 {
7231 }
7232 #endif
7233 
7234 /**
7235  * dp_update_config_parameters() - API to store datapath
7236  *                            config parameters
7237  * @soc: soc handle
7238  * @cfg: ini parameter handle
7239  *
7240  * Return: status
7241  */
7242 static
7243 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
7244 				struct cdp_config_params *params)
7245 {
7246 	struct dp_soc *soc = (struct dp_soc *)psoc;
7247 
7248 	if (!(soc)) {
7249 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7250 				"%s: Invalid handle", __func__);
7251 		return QDF_STATUS_E_INVAL;
7252 	}
7253 
7254 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
7255 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
7256 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
7257 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
7258 				params->tcp_udp_checksumoffload;
7259 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
7260 	dp_update_flow_control_parameters(soc, params);
7261 
7262 	return QDF_STATUS_SUCCESS;
7263 }
7264 
7265 /**
7266  * dp_txrx_set_wds_rx_policy() - API to store datapath
7267  *                            config parameters
7268  * @vdev_handle - datapath vdev handle
7269  * @cfg: ini parameter handle
7270  *
7271  * Return: status
7272  */
7273 #ifdef WDS_VENDOR_EXTENSION
7274 void
7275 dp_txrx_set_wds_rx_policy(
7276 		struct cdp_vdev *vdev_handle,
7277 		u_int32_t val)
7278 {
7279 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7280 	struct dp_peer *peer;
7281 	if (vdev->opmode == wlan_op_mode_ap) {
7282 		/* for ap, set it on bss_peer */
7283 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
7284 			if (peer->bss_peer) {
7285 				peer->wds_ecm.wds_rx_filter = 1;
7286 				peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7287 				peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7288 				break;
7289 			}
7290 		}
7291 	} else if (vdev->opmode == wlan_op_mode_sta) {
7292 		peer = TAILQ_FIRST(&vdev->peer_list);
7293 		peer->wds_ecm.wds_rx_filter = 1;
7294 		peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7295 		peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7296 	}
7297 }
7298 
7299 /**
7300  * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
7301  *
7302  * @peer_handle - datapath peer handle
7303  * @wds_tx_ucast: policy for unicast transmission
7304  * @wds_tx_mcast: policy for multicast transmission
7305  *
7306  * Return: void
7307  */
7308 void
7309 dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
7310 		int wds_tx_ucast, int wds_tx_mcast)
7311 {
7312 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
7313 	if (wds_tx_ucast || wds_tx_mcast) {
7314 		peer->wds_enabled = 1;
7315 		peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
7316 		peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
7317 	} else {
7318 		peer->wds_enabled = 0;
7319 		peer->wds_ecm.wds_tx_ucast_4addr = 0;
7320 		peer->wds_ecm.wds_tx_mcast_4addr = 0;
7321 	}
7322 
7323 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7324 			FL("Policy Update set to :\
7325 				peer->wds_enabled %d\
7326 				peer->wds_ecm.wds_tx_ucast_4addr %d\
7327 				peer->wds_ecm.wds_tx_mcast_4addr %d"),
7328 				peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
7329 				peer->wds_ecm.wds_tx_mcast_4addr);
7330 	return;
7331 }
7332 #endif
7333 
7334 static struct cdp_wds_ops dp_ops_wds = {
7335 	.vdev_set_wds = dp_vdev_set_wds,
7336 #ifdef WDS_VENDOR_EXTENSION
7337 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
7338 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
7339 #endif
7340 };
7341 
7342 /*
7343  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
7344  * @vdev_handle - datapath vdev handle
7345  * @callback - callback function
7346  * @ctxt: callback context
7347  *
7348  */
7349 static void
7350 dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
7351 		       ol_txrx_data_tx_cb callback, void *ctxt)
7352 {
7353 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7354 
7355 	vdev->tx_non_std_data_callback.func = callback;
7356 	vdev->tx_non_std_data_callback.ctxt = ctxt;
7357 }
7358 
7359 /**
7360  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
7361  * @pdev_hdl: datapath pdev handle
7362  *
7363  * Return: opaque pointer to dp txrx handle
7364  */
7365 static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
7366 {
7367 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7368 
7369 	return pdev->dp_txrx_handle;
7370 }
7371 
7372 /**
7373  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
7374  * @pdev_hdl: datapath pdev handle
7375  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
7376  *
7377  * Return: void
7378  */
7379 static void
7380 dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
7381 {
7382 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7383 
7384 	pdev->dp_txrx_handle = dp_txrx_hdl;
7385 }
7386 
7387 /**
7388  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
7389  * @soc_handle: datapath soc handle
7390  *
7391  * Return: opaque pointer to external dp (non-core DP)
7392  */
7393 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
7394 {
7395 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
7396 
7397 	return soc->external_txrx_handle;
7398 }
7399 
7400 /**
7401  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
7402  * @soc_handle: datapath soc handle
7403  * @txrx_handle: opaque pointer to external dp (non-core DP)
7404  *
7405  * Return: void
7406  */
7407 static void
7408 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
7409 {
7410 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
7411 
7412 	soc->external_txrx_handle = txrx_handle;
7413 }
7414 
7415 #ifdef FEATURE_AST
7416 static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
7417 {
7418 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
7419 	struct dp_peer *peer = (struct dp_peer *) peer_hdl;
7420 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7421 
7422 	/*
7423 	 * For BSS peer, new peer is not created on alloc_node if the
7424 	 * peer with same address already exists , instead refcnt is
7425 	 * increased for existing peer. Correspondingly in delete path,
7426 	 * only refcnt is decreased; and peer is only deleted , when all
7427 	 * references are deleted. So delete_in_progress should not be set
7428 	 * for bss_peer, unless only 2 reference remains (peer map reference
7429 	 * and peer hash table reference).
7430 	 */
7431 	if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
7432 		return;
7433 	}
7434 
7435 	peer->delete_in_progress = true;
7436 	dp_peer_delete_ast_entries(soc, peer);
7437 }
7438 #endif
7439 
7440 #ifdef ATH_SUPPORT_NAC_RSSI
7441 /**
7442  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
7443  * @vdev_hdl: DP vdev handle
7444  * @rssi: rssi value
7445  *
7446  * Return: 0 for success. nonzero for failure.
7447  */
7448 QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
7449 				       char *mac_addr,
7450 				       uint8_t *rssi)
7451 {
7452 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7453 	struct dp_pdev *pdev = vdev->pdev;
7454 	struct dp_neighbour_peer *peer = NULL;
7455 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
7456 
7457 	*rssi = 0;
7458 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
7459 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
7460 		      neighbour_peer_list_elem) {
7461 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
7462 				mac_addr, DP_MAC_ADDR_LEN) == 0) {
7463 			*rssi = peer->rssi;
7464 			status = QDF_STATUS_SUCCESS;
7465 			break;
7466 		}
7467 	}
7468 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
7469 	return status;
7470 }
7471 
7472 static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
7473 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
7474 		uint8_t chan_num)
7475 {
7476 
7477 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7478 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7479 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7480 
7481 	pdev->nac_rssi_filtering = 1;
7482 	/* Store address of NAC (neighbour peer) which will be checked
7483 	 * against TA of received packets.
7484 	 */
7485 
7486 	if (cmd == CDP_NAC_PARAM_ADD) {
7487 		dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
7488 						 client_macaddr);
7489 	} else if (cmd == CDP_NAC_PARAM_DEL) {
7490 		dp_update_filter_neighbour_peers(vdev_handle,
7491 						 DP_NAC_PARAM_DEL,
7492 						 client_macaddr);
7493 	}
7494 
7495 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
7496 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
7497 			((void *)vdev->pdev->ctrl_pdev,
7498 			 vdev->vdev_id, cmd, bssid);
7499 
7500 	return QDF_STATUS_SUCCESS;
7501 }
7502 #endif
7503 
7504 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
7505 		uint32_t max_peers)
7506 {
7507 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7508 
7509 	soc->max_peers = max_peers;
7510 
7511 	qdf_print ("%s max_peers %u\n", __func__, max_peers);
7512 
7513 	if (dp_peer_find_attach(soc))
7514 		return QDF_STATUS_E_FAILURE;
7515 
7516 	return QDF_STATUS_SUCCESS;
7517 }
7518 
7519 /**
7520  * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
7521  * @dp_pdev: dp pdev handle
7522  * @ctrl_pdev: UMAC ctrl pdev handle
7523  *
7524  * Return: void
7525  */
7526 static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
7527 				  struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
7528 {
7529 	struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
7530 
7531 	pdev->ctrl_pdev = ctrl_pdev;
7532 }
7533 
7534 static struct cdp_cmn_ops dp_ops_cmn = {
7535 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
7536 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
7537 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
7538 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
7539 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
7540 	.txrx_peer_create = dp_peer_create_wifi3,
7541 	.txrx_peer_setup = dp_peer_setup_wifi3,
7542 #ifdef FEATURE_AST
7543 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
7544 #else
7545 	.txrx_peer_teardown = NULL,
7546 #endif
7547 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
7548 	.txrx_peer_del_ast = dp_peer_del_ast_wifi3,
7549 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
7550 	.txrx_peer_ast_hash_find = dp_peer_ast_hash_find_wifi3,
7551 	.txrx_peer_ast_get_pdev_id = dp_peer_ast_get_pdev_id_wifi3,
7552 	.txrx_peer_ast_get_next_hop = dp_peer_ast_get_next_hop_wifi3,
7553 	.txrx_peer_ast_set_type = dp_peer_ast_set_type_wifi3,
7554 	.txrx_peer_ast_get_type = dp_peer_ast_get_type_wifi3,
7555 	.txrx_peer_delete = dp_peer_delete_wifi3,
7556 	.txrx_vdev_register = dp_vdev_register_wifi3,
7557 	.txrx_soc_detach = dp_soc_detach_wifi3,
7558 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
7559 	.txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
7560 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
7561 	.txrx_ath_getstats = dp_get_device_stats,
7562 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
7563 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
7564 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
7565 	.delba_process = dp_delba_process_wifi3,
7566 	.set_addba_response = dp_set_addba_response,
7567 	.get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
7568 	.flush_cache_rx_queue = NULL,
7569 	/* TODO: get API's for dscp-tid need to be added*/
7570 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
7571 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
7572 	.txrx_stats_request = dp_txrx_stats_request,
7573 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
7574 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
7575 	.txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
7576 	.txrx_set_nac = dp_set_nac,
7577 	.txrx_get_tx_pending = dp_get_tx_pending,
7578 	.txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
7579 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
7580 	.display_stats = dp_txrx_dump_stats,
7581 	.txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
7582 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
7583 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
7584 	.txrx_intr_detach = dp_soc_interrupt_detach,
7585 	.set_pn_check = dp_set_pn_check_wifi3,
7586 	.update_config_parameters = dp_update_config_parameters,
7587 	/* TODO: Add other functions */
7588 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
7589 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
7590 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
7591 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
7592 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
7593 	.tx_send = dp_tx_send,
7594 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
7595 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
7596 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
7597 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
7598 	.txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
7599 };
7600 
7601 static struct cdp_ctrl_ops dp_ops_ctrl = {
7602 	.txrx_peer_authorize = dp_peer_authorize,
7603 #ifdef QCA_SUPPORT_SON
7604 	.txrx_set_inact_params = dp_set_inact_params,
7605 	.txrx_start_inact_timer = dp_start_inact_timer,
7606 	.txrx_set_overload = dp_set_overload,
7607 	.txrx_peer_is_inact = dp_peer_is_inact,
7608 	.txrx_mark_peer_inact = dp_mark_peer_inact,
7609 #endif
7610 	.txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
7611 	.txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
7612 #ifdef MESH_MODE_SUPPORT
7613 	.txrx_set_mesh_mode  = dp_peer_set_mesh_mode,
7614 	.txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
7615 #endif
7616 	.txrx_set_vdev_param = dp_set_vdev_param,
7617 	.txrx_peer_set_nawds = dp_peer_set_nawds,
7618 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
7619 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
7620 	.txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
7621 	.txrx_update_filter_neighbour_peers =
7622 		dp_update_filter_neighbour_peers,
7623 	.txrx_get_sec_type = dp_get_sec_type,
7624 	/* TODO: Add other functions */
7625 	.txrx_wdi_event_sub = dp_wdi_event_sub,
7626 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
7627 #ifdef WDI_EVENT_ENABLE
7628 	.txrx_get_pldev = dp_get_pldev,
7629 #endif
7630 	.txrx_set_pdev_param = dp_set_pdev_param,
7631 #ifdef ATH_SUPPORT_NAC_RSSI
7632 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
7633 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
7634 #endif
7635 	.set_key = dp_set_michael_key,
7636 };
7637 
7638 static struct cdp_me_ops dp_ops_me = {
7639 #ifdef ATH_SUPPORT_IQUE
7640 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
7641 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
7642 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
7643 #endif
7644 };
7645 
7646 static struct cdp_mon_ops dp_ops_mon = {
7647 	.txrx_monitor_set_filter_ucast_data = NULL,
7648 	.txrx_monitor_set_filter_mcast_data = NULL,
7649 	.txrx_monitor_set_filter_non_data = NULL,
7650 	.txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
7651 	.txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
7652 	.txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
7653 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
7654 	/* Added support for HK advance filter */
7655 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
7656 };
7657 
7658 static struct cdp_host_stats_ops dp_ops_host_stats = {
7659 	.txrx_per_peer_stats = dp_get_host_peer_stats,
7660 	.get_fw_peer_stats = dp_get_fw_peer_stats,
7661 	.get_htt_stats = dp_get_htt_stats,
7662 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
7663 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
7664 	.txrx_stats_publish = dp_txrx_stats_publish,
7665 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
7666 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
7667 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
7668 	/* TODO */
7669 };
7670 
7671 static struct cdp_raw_ops dp_ops_raw = {
7672 	/* TODO */
7673 };
7674 
7675 #ifdef CONFIG_WIN
7676 static struct cdp_pflow_ops dp_ops_pflow = {
7677 	/* TODO */
7678 };
7679 #endif /* CONFIG_WIN */
7680 
7681 #ifdef FEATURE_RUNTIME_PM
7682 /**
7683  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
7684  * @opaque_pdev: DP pdev context
7685  *
7686  * DP is ready to runtime suspend if there are no pending TX packets.
7687  *
7688  * Return: QDF_STATUS
7689  */
7690 static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
7691 {
7692 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7693 	struct dp_soc *soc = pdev->soc;
7694 
7695 	/* Call DP TX flow control API to check if there is any
7696 	   pending packets */
7697 
7698 	if (soc->intr_mode == DP_INTR_POLL)
7699 		qdf_timer_stop(&soc->int_timer);
7700 
7701 	return QDF_STATUS_SUCCESS;
7702 }
7703 
7704 /**
7705  * dp_runtime_resume() - ensure DP is ready to runtime resume
7706  * @opaque_pdev: DP pdev context
7707  *
7708  * Resume DP for runtime PM.
7709  *
7710  * Return: QDF_STATUS
7711  */
7712 static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
7713 {
7714 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7715 	struct dp_soc *soc = pdev->soc;
7716 	void *hal_srng;
7717 	int i;
7718 
7719 	if (soc->intr_mode == DP_INTR_POLL)
7720 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7721 
7722 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
7723 		hal_srng = soc->tcl_data_ring[i].hal_srng;
7724 		if (hal_srng) {
7725 			/* We actually only need to acquire the lock */
7726 			hal_srng_access_start(soc->hal_soc, hal_srng);
7727 			/* Update SRC ring head pointer for HW to send
7728 			   all pending packets */
7729 			hal_srng_access_end(soc->hal_soc, hal_srng);
7730 		}
7731 	}
7732 
7733 	return QDF_STATUS_SUCCESS;
7734 }
7735 #endif /* FEATURE_RUNTIME_PM */
7736 
7737 static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
7738 {
7739 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7740 	struct dp_soc *soc = pdev->soc;
7741 
7742 	if (soc->intr_mode == DP_INTR_POLL)
7743 		qdf_timer_stop(&soc->int_timer);
7744 
7745 	return QDF_STATUS_SUCCESS;
7746 }
7747 
7748 static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
7749 {
7750 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7751 	struct dp_soc *soc = pdev->soc;
7752 
7753 	if (soc->intr_mode == DP_INTR_POLL)
7754 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7755 
7756 	return QDF_STATUS_SUCCESS;
7757 }
7758 
7759 #ifndef CONFIG_WIN
7760 static struct cdp_misc_ops dp_ops_misc = {
7761 	.tx_non_std = dp_tx_non_std,
7762 	.get_opmode = dp_get_opmode,
7763 #ifdef FEATURE_RUNTIME_PM
7764 	.runtime_suspend = dp_runtime_suspend,
7765 	.runtime_resume = dp_runtime_resume,
7766 #endif /* FEATURE_RUNTIME_PM */
7767 	.pkt_log_init = dp_pkt_log_init,
7768 	.pkt_log_con_service = dp_pkt_log_con_service,
7769 };
7770 
7771 static struct cdp_flowctl_ops dp_ops_flowctl = {
7772 	/* WIFI 3.0 DP implement as required. */
7773 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7774 	.flow_pool_map_handler = dp_tx_flow_pool_map,
7775 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
7776 	.register_pause_cb = dp_txrx_register_pause_cb,
7777 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
7778 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
7779 };
7780 
7781 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
7782 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7783 };
7784 
7785 #ifdef IPA_OFFLOAD
7786 static struct cdp_ipa_ops dp_ops_ipa = {
7787 	.ipa_get_resource = dp_ipa_get_resource,
7788 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
7789 	.ipa_op_response = dp_ipa_op_response,
7790 	.ipa_register_op_cb = dp_ipa_register_op_cb,
7791 	.ipa_get_stat = dp_ipa_get_stat,
7792 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
7793 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
7794 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
7795 	.ipa_setup = dp_ipa_setup,
7796 	.ipa_cleanup = dp_ipa_cleanup,
7797 	.ipa_setup_iface = dp_ipa_setup_iface,
7798 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
7799 	.ipa_enable_pipes = dp_ipa_enable_pipes,
7800 	.ipa_disable_pipes = dp_ipa_disable_pipes,
7801 	.ipa_set_perf_level = dp_ipa_set_perf_level
7802 };
7803 #endif
7804 
7805 static struct cdp_bus_ops dp_ops_bus = {
7806 	.bus_suspend = dp_bus_suspend,
7807 	.bus_resume = dp_bus_resume
7808 };
7809 
7810 static struct cdp_ocb_ops dp_ops_ocb = {
7811 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7812 };
7813 
7814 
7815 static struct cdp_throttle_ops dp_ops_throttle = {
7816 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7817 };
7818 
7819 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
7820 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7821 };
7822 
7823 static struct cdp_cfg_ops dp_ops_cfg = {
7824 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7825 };
7826 
7827 /*
7828  * dp_wrapper_peer_get_ref_by_addr - wrapper function to get to peer
7829  * @dev: physical device instance
7830  * @peer_mac_addr: peer mac address
7831  * @local_id: local id for the peer
7832  * @debug_id: to track enum peer access
7833 
7834  * Return: peer instance pointer
7835  */
7836 static inline void *
7837 dp_wrapper_peer_get_ref_by_addr(struct cdp_pdev *dev, u8 *peer_mac_addr,
7838 				u8 *local_id,
7839 				enum peer_debug_id_type debug_id)
7840 {
7841 	/*
7842 	 * Currently this function does not implement the "get ref"
7843 	 * functionality and is mapped to dp_find_peer_by_addr which does not
7844 	 * increment the peer ref count. So the peer state is uncertain after
7845 	 * calling this API. The functionality needs to be implemented.
7846 	 * Accordingly the corresponding release_ref function is NULL.
7847 	 */
7848 	return dp_find_peer_by_addr(dev, peer_mac_addr, local_id);
7849 }
7850 
7851 static struct cdp_peer_ops dp_ops_peer = {
7852 	.register_peer = dp_register_peer,
7853 	.clear_peer = dp_clear_peer,
7854 	.find_peer_by_addr = dp_find_peer_by_addr,
7855 	.find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
7856 	.peer_get_ref_by_addr = dp_wrapper_peer_get_ref_by_addr,
7857 	.peer_release_ref = NULL,
7858 	.local_peer_id = dp_local_peer_id,
7859 	.peer_find_by_local_id = dp_peer_find_by_local_id,
7860 	.peer_state_update = dp_peer_state_update,
7861 	.get_vdevid = dp_get_vdevid,
7862 	.get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
7863 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
7864 	.get_vdev_for_peer = dp_get_vdev_for_peer,
7865 	.get_peer_state = dp_get_peer_state,
7866 	.get_last_mgmt_timestamp = dp_get_last_mgmt_timestamp,
7867 	.update_last_mgmt_timestamp = dp_update_last_mgmt_timestamp,
7868 };
7869 #endif
7870 
7871 static struct cdp_ops dp_txrx_ops = {
7872 	.cmn_drv_ops = &dp_ops_cmn,
7873 	.ctrl_ops = &dp_ops_ctrl,
7874 	.me_ops = &dp_ops_me,
7875 	.mon_ops = &dp_ops_mon,
7876 	.host_stats_ops = &dp_ops_host_stats,
7877 	.wds_ops = &dp_ops_wds,
7878 	.raw_ops = &dp_ops_raw,
7879 #ifdef CONFIG_WIN
7880 	.pflow_ops = &dp_ops_pflow,
7881 #endif /* CONFIG_WIN */
7882 #ifndef CONFIG_WIN
7883 	.misc_ops = &dp_ops_misc,
7884 	.cfg_ops = &dp_ops_cfg,
7885 	.flowctl_ops = &dp_ops_flowctl,
7886 	.l_flowctl_ops = &dp_ops_l_flowctl,
7887 #ifdef IPA_OFFLOAD
7888 	.ipa_ops = &dp_ops_ipa,
7889 #endif
7890 	.bus_ops = &dp_ops_bus,
7891 	.ocb_ops = &dp_ops_ocb,
7892 	.peer_ops = &dp_ops_peer,
7893 	.throttle_ops = &dp_ops_throttle,
7894 	.mob_stats_ops = &dp_ops_mob_stats,
7895 #endif
7896 };
7897 
7898 /*
7899  * dp_soc_set_txrx_ring_map()
7900  * @dp_soc: DP handler for soc
7901  *
7902  * Return: Void
7903  */
7904 static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
7905 {
7906 	uint32_t i;
7907 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
7908 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_DEFAULT_MAP][i];
7909 	}
7910 }
7911 
7912 #ifdef QCA_WIFI_QCA8074
7913 /**
7914  * dp_soc_attach_wifi3() - Attach txrx SOC
7915  * @ctrl_psoc:	Opaque SOC handle from control plane
7916  * @htc_handle:	Opaque HTC handle
7917  * @hif_handle:	Opaque HIF handle
7918  * @qdf_osdev:	QDF device
7919  * @ol_ops:	Offload Operations
7920  * @device_id:	Device ID
7921  *
7922  * Return: DP SOC handle on success, NULL on failure
7923  */
7924 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
7925 			  HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
7926 			  struct ol_if_ops *ol_ops, uint16_t device_id)
7927 {
7928 	struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
7929 	int target_type;
7930 
7931 	if (!soc) {
7932 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7933 			FL("DP SOC memory allocation failed"));
7934 		goto fail0;
7935 	}
7936 
7937 	soc->device_id = device_id;
7938 	soc->cdp_soc.ops = &dp_txrx_ops;
7939 	soc->cdp_soc.ol_ops = ol_ops;
7940 	soc->ctrl_psoc = ctrl_psoc;
7941 	soc->osdev = qdf_osdev;
7942 	soc->hif_handle = hif_handle;
7943 
7944 	soc->hal_soc = hif_get_hal_handle(hif_handle);
7945 	soc->htt_handle = htt_soc_attach(soc, ctrl_psoc, htc_handle,
7946 		soc->hal_soc, qdf_osdev);
7947 	if (!soc->htt_handle) {
7948 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7949 			FL("HTT attach failed"));
7950 		goto fail1;
7951 	}
7952 
7953 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
7954 	if (!soc->wlan_cfg_ctx) {
7955 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7956 			FL("wlan_cfg_soc_attach failed"));
7957 		goto fail2;
7958 	}
7959 	target_type = hal_get_target_type(soc->hal_soc);
7960 	switch (target_type) {
7961 	case TARGET_TYPE_QCA6290:
7962 #ifdef QCA_WIFI_QCA6390
7963 	case TARGET_TYPE_QCA6390:
7964 #endif
7965 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
7966 					       REO_DST_RING_SIZE_QCA6290);
7967 		break;
7968 	case TARGET_TYPE_QCA8074:
7969 	case TARGET_TYPE_QCA8074V2:
7970 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
7971 					       REO_DST_RING_SIZE_QCA8074);
7972 		break;
7973 	default:
7974 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
7975 		qdf_assert_always(0);
7976 		break;
7977 	}
7978 
7979 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
7980 			     cfg_get(ctrl_psoc, CFG_DP_RX_HASH));
7981 	soc->cce_disable = false;
7982 
7983 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
7984 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
7985 				CDP_CFG_MAX_PEER_ID);
7986 
7987 		if (ret != -EINVAL) {
7988 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
7989 		}
7990 
7991 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
7992 				CDP_CFG_CCE_DISABLE);
7993 		if (ret == 1)
7994 			soc->cce_disable = true;
7995 	}
7996 
7997 	qdf_spinlock_create(&soc->peer_ref_mutex);
7998 
7999 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
8000 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
8001 
8002 	/* fill the tx/rx cpu ring map*/
8003 	dp_soc_set_txrx_ring_map(soc);
8004 
8005 	qdf_spinlock_create(&soc->htt_stats.lock);
8006 	/* initialize work queue for stats processing */
8007 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
8008 
8009 	/*Initialize inactivity timer for wifison */
8010 	dp_init_inact_timer(soc);
8011 
8012 	return (void *)soc;
8013 
8014 fail2:
8015 	htt_soc_detach(soc->htt_handle);
8016 fail1:
8017 	qdf_mem_free(soc);
8018 fail0:
8019 	return NULL;
8020 }
8021 #endif
8022 
8023 /*
8024  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
8025  *
8026  * @soc: handle to DP soc
8027  * @mac_id: MAC id
8028  *
8029  * Return: Return pdev corresponding to MAC
8030  */
8031 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
8032 {
8033 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
8034 		return soc->pdev_list[mac_id];
8035 
8036 	/* Typically for MCL as there only 1 PDEV*/
8037 	return soc->pdev_list[0];
8038 }
8039 
8040 /*
8041  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
8042  * @soc:		DP SoC context
8043  * @max_mac_rings:	No of MAC rings
8044  *
8045  * Return: None
8046  */
8047 static
8048 void dp_is_hw_dbs_enable(struct dp_soc *soc,
8049 				int *max_mac_rings)
8050 {
8051 	bool dbs_enable = false;
8052 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
8053 		dbs_enable = soc->cdp_soc.ol_ops->
8054 		is_hw_dbs_2x2_capable(soc->ctrl_psoc);
8055 
8056 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
8057 }
8058 
8059 /*
8060 * dp_set_pktlog_wifi3() - attach txrx vdev
8061 * @pdev: Datapath PDEV handle
8062 * @event: which event's notifications are being subscribed to
8063 * @enable: WDI event subscribe or not. (True or False)
8064 *
8065 * Return: Success, NULL on failure
8066 */
8067 #ifdef WDI_EVENT_ENABLE
8068 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
8069 	bool enable)
8070 {
8071 	struct dp_soc *soc = pdev->soc;
8072 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
8073 	int max_mac_rings = wlan_cfg_get_num_mac_rings
8074 					(pdev->wlan_cfg_ctx);
8075 	uint8_t mac_id = 0;
8076 
8077 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
8078 
8079 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
8080 			FL("Max_mac_rings %d "),
8081 			max_mac_rings);
8082 
8083 	if (enable) {
8084 		switch (event) {
8085 		case WDI_EVENT_RX_DESC:
8086 			if (pdev->monitor_vdev) {
8087 				/* Nothing needs to be done if monitor mode is
8088 				 * enabled
8089 				 */
8090 				return 0;
8091 			}
8092 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
8093 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
8094 				htt_tlv_filter.mpdu_start = 1;
8095 				htt_tlv_filter.msdu_start = 1;
8096 				htt_tlv_filter.msdu_end = 1;
8097 				htt_tlv_filter.mpdu_end = 1;
8098 				htt_tlv_filter.packet_header = 1;
8099 				htt_tlv_filter.attention = 1;
8100 				htt_tlv_filter.ppdu_start = 1;
8101 				htt_tlv_filter.ppdu_end = 1;
8102 				htt_tlv_filter.ppdu_end_user_stats = 1;
8103 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
8104 				htt_tlv_filter.ppdu_end_status_done = 1;
8105 				htt_tlv_filter.enable_fp = 1;
8106 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8107 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8108 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8109 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8110 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8111 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
8112 
8113 				for (mac_id = 0; mac_id < max_mac_rings;
8114 								mac_id++) {
8115 					int mac_for_pdev =
8116 						dp_get_mac_id_for_pdev(mac_id,
8117 								pdev->pdev_id);
8118 
8119 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8120 					 mac_for_pdev,
8121 					 pdev->rxdma_mon_status_ring[mac_id]
8122 					 .hal_srng,
8123 					 RXDMA_MONITOR_STATUS,
8124 					 RX_BUFFER_SIZE,
8125 					 &htt_tlv_filter);
8126 
8127 				}
8128 
8129 				if (soc->reap_timer_init)
8130 					qdf_timer_mod(&soc->mon_reap_timer,
8131 					DP_INTR_POLL_TIMER_MS);
8132 			}
8133 			break;
8134 
8135 		case WDI_EVENT_LITE_RX:
8136 			if (pdev->monitor_vdev) {
8137 				/* Nothing needs to be done if monitor mode is
8138 				 * enabled
8139 				 */
8140 				return 0;
8141 			}
8142 
8143 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
8144 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
8145 
8146 				htt_tlv_filter.ppdu_start = 1;
8147 				htt_tlv_filter.ppdu_end = 1;
8148 				htt_tlv_filter.ppdu_end_user_stats = 1;
8149 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
8150 				htt_tlv_filter.ppdu_end_status_done = 1;
8151 				htt_tlv_filter.mpdu_start = 1;
8152 				htt_tlv_filter.enable_fp = 1;
8153 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8154 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8155 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8156 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8157 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8158 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
8159 
8160 				for (mac_id = 0; mac_id < max_mac_rings;
8161 								mac_id++) {
8162 					int mac_for_pdev =
8163 						dp_get_mac_id_for_pdev(mac_id,
8164 								pdev->pdev_id);
8165 
8166 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8167 					mac_for_pdev,
8168 					pdev->rxdma_mon_status_ring[mac_id]
8169 					.hal_srng,
8170 					RXDMA_MONITOR_STATUS,
8171 					RX_BUFFER_SIZE_PKTLOG_LITE,
8172 					&htt_tlv_filter);
8173 				}
8174 
8175 				if (soc->reap_timer_init)
8176 					qdf_timer_mod(&soc->mon_reap_timer,
8177 					DP_INTR_POLL_TIMER_MS);
8178 			}
8179 			break;
8180 
8181 		case WDI_EVENT_LITE_T2H:
8182 			if (pdev->monitor_vdev) {
8183 				/* Nothing needs to be done if monitor mode is
8184 				 * enabled
8185 				 */
8186 				return 0;
8187 			}
8188 
8189 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
8190 				int mac_for_pdev = dp_get_mac_id_for_pdev(
8191 							mac_id,	pdev->pdev_id);
8192 
8193 				pdev->pktlog_ppdu_stats = true;
8194 				dp_h2t_cfg_stats_msg_send(pdev,
8195 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
8196 					mac_for_pdev);
8197 			}
8198 			break;
8199 
8200 		default:
8201 			/* Nothing needs to be done for other pktlog types */
8202 			break;
8203 		}
8204 	} else {
8205 		switch (event) {
8206 		case WDI_EVENT_RX_DESC:
8207 		case WDI_EVENT_LITE_RX:
8208 			if (pdev->monitor_vdev) {
8209 				/* Nothing needs to be done if monitor mode is
8210 				 * enabled
8211 				 */
8212 				return 0;
8213 			}
8214 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
8215 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
8216 
8217 				for (mac_id = 0; mac_id < max_mac_rings;
8218 								mac_id++) {
8219 					int mac_for_pdev =
8220 						dp_get_mac_id_for_pdev(mac_id,
8221 								pdev->pdev_id);
8222 
8223 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8224 					  mac_for_pdev,
8225 					  pdev->rxdma_mon_status_ring[mac_id]
8226 					  .hal_srng,
8227 					  RXDMA_MONITOR_STATUS,
8228 					  RX_BUFFER_SIZE,
8229 					  &htt_tlv_filter);
8230 				}
8231 
8232 				if (soc->reap_timer_init)
8233 					qdf_timer_stop(&soc->mon_reap_timer);
8234 			}
8235 			break;
8236 		case WDI_EVENT_LITE_T2H:
8237 			if (pdev->monitor_vdev) {
8238 				/* Nothing needs to be done if monitor mode is
8239 				 * enabled
8240 				 */
8241 				return 0;
8242 			}
8243 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
8244 			 * passing value 0. Once these macros will define in htt
8245 			 * header file will use proper macros
8246 			*/
8247 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
8248 				int mac_for_pdev =
8249 						dp_get_mac_id_for_pdev(mac_id,
8250 								pdev->pdev_id);
8251 
8252 				pdev->pktlog_ppdu_stats = false;
8253 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
8254 					dp_h2t_cfg_stats_msg_send(pdev, 0,
8255 								mac_for_pdev);
8256 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
8257 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
8258 								mac_for_pdev);
8259 				} else if (pdev->enhanced_stats_en) {
8260 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
8261 								mac_for_pdev);
8262 				}
8263 			}
8264 
8265 			break;
8266 		default:
8267 			/* Nothing needs to be done for other pktlog types */
8268 			break;
8269 		}
8270 	}
8271 	return 0;
8272 }
8273 #endif
8274