xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 87a8e4458319c60b618522e263ed900e36aab528)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_htt.h"
31 #include "dp_types.h"
32 #include "dp_internal.h"
33 #include "dp_tx.h"
34 #include "dp_tx_desc.h"
35 #include "dp_rx.h"
36 #include <cdp_txrx_handle.h>
37 #include <wlan_cfg.h>
38 #include "cdp_txrx_cmn_struct.h"
39 #include "cdp_txrx_stats_struct.h"
40 #include "cdp_txrx_cmn_reg.h"
41 #include <qdf_util.h>
42 #include "dp_peer.h"
43 #include "dp_rx_mon.h"
44 #include "htt_stats.h"
45 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
46 #include "cfg_ucfg_api.h"
47 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
48 #include "cdp_txrx_flow_ctrl_v2.h"
49 #else
50 static inline void
51 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
52 {
53 	return;
54 }
55 #endif
56 #include "dp_ipa.h"
57 
58 #ifdef CONFIG_MCL
59 #ifndef REMOVE_PKT_LOG
60 #include <pktlog_ac_api.h>
61 #include <pktlog_ac.h>
62 #endif
63 #endif
64 static void dp_pktlogmod_exit(struct dp_pdev *handle);
65 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
66 				uint8_t *peer_mac_addr,
67 				struct cdp_ctrl_objmgr_peer *ctrl_peer);
68 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
69 static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
70 static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
71 
72 #define DP_INTR_POLL_TIMER_MS	10
73 #define DP_WDS_AGING_TIMER_DEFAULT_MS	120000
74 #define DP_MCS_LENGTH (6*MAX_MCS)
75 #define DP_NSS_LENGTH (6*SS_COUNT)
76 #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
77 #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
78 #define DP_MAX_MCS_STRING_LEN 30
79 #define DP_CURR_FW_STATS_AVAIL 19
80 #define DP_HTT_DBG_EXT_STATS_MAX 256
81 #define DP_MAX_SLEEP_TIME 100
82 
83 #ifdef IPA_OFFLOAD
84 /* Exclude IPA rings from the interrupt context */
85 #define TX_RING_MASK_VAL	0xb
86 #define RX_RING_MASK_VAL	0x7
87 #else
88 #define TX_RING_MASK_VAL	0xF
89 #define RX_RING_MASK_VAL	0xF
90 #endif
91 
92 #define STR_MAXLEN	64
93 
94 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
95 
96 /* PPDU stats mask sent to FW to enable enhanced stats */
97 #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
98 /* PPDU stats mask sent to FW to support debug sniffer feature */
99 #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
100 /* PPDU stats mask sent to FW to support BPR feature*/
101 #define DP_PPDU_STATS_CFG_BPR 0x2000
102 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
103 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
104 				   DP_PPDU_STATS_CFG_ENH_STATS)
105 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
106 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
107 				      DP_PPDU_TXLITE_STATS_BITMASK_CFG)
108 
109 #define RNG_ERR		"SRNG setup failed for"
110 /**
111  * default_dscp_tid_map - Default DSCP-TID mapping
112  *
113  * DSCP        TID
114  * 000000      0
115  * 001000      1
116  * 010000      2
117  * 011000      3
118  * 100000      4
119  * 101000      5
120  * 110000      6
121  * 111000      7
122  */
123 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
124 	0, 0, 0, 0, 0, 0, 0, 0,
125 	1, 1, 1, 1, 1, 1, 1, 1,
126 	2, 2, 2, 2, 2, 2, 2, 2,
127 	3, 3, 3, 3, 3, 3, 3, 3,
128 	4, 4, 4, 4, 4, 4, 4, 4,
129 	5, 5, 5, 5, 5, 5, 5, 5,
130 	6, 6, 6, 6, 6, 6, 6, 6,
131 	7, 7, 7, 7, 7, 7, 7, 7,
132 };
133 
134 /*
135  * struct dp_rate_debug
136  *
137  * @mcs_type: print string for a given mcs
138  * @valid: valid mcs rate?
139  */
140 struct dp_rate_debug {
141 	char mcs_type[DP_MAX_MCS_STRING_LEN];
142 	uint8_t valid;
143 };
144 
145 #define MCS_VALID 1
146 #define MCS_INVALID 0
147 
148 static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
149 
150 	{
151 		{"OFDM 48 Mbps", MCS_VALID},
152 		{"OFDM 24 Mbps", MCS_VALID},
153 		{"OFDM 12 Mbps", MCS_VALID},
154 		{"OFDM 6 Mbps ", MCS_VALID},
155 		{"OFDM 54 Mbps", MCS_VALID},
156 		{"OFDM 36 Mbps", MCS_VALID},
157 		{"OFDM 18 Mbps", MCS_VALID},
158 		{"OFDM 9 Mbps ", MCS_VALID},
159 		{"INVALID ", MCS_INVALID},
160 		{"INVALID ", MCS_INVALID},
161 		{"INVALID ", MCS_INVALID},
162 		{"INVALID ", MCS_INVALID},
163 		{"INVALID ", MCS_VALID},
164 	},
165 	{
166 		{"CCK 11 Mbps Long  ", MCS_VALID},
167 		{"CCK 5.5 Mbps Long ", MCS_VALID},
168 		{"CCK 2 Mbps Long   ", MCS_VALID},
169 		{"CCK 1 Mbps Long   ", MCS_VALID},
170 		{"CCK 11 Mbps Short ", MCS_VALID},
171 		{"CCK 5.5 Mbps Short", MCS_VALID},
172 		{"CCK 2 Mbps Short  ", MCS_VALID},
173 		{"INVALID ", MCS_INVALID},
174 		{"INVALID ", MCS_INVALID},
175 		{"INVALID ", MCS_INVALID},
176 		{"INVALID ", MCS_INVALID},
177 		{"INVALID ", MCS_INVALID},
178 		{"INVALID ", MCS_VALID},
179 	},
180 	{
181 		{"HT MCS 0 (BPSK 1/2)  ", MCS_VALID},
182 		{"HT MCS 1 (QPSK 1/2)  ", MCS_VALID},
183 		{"HT MCS 2 (QPSK 3/4)  ", MCS_VALID},
184 		{"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
185 		{"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
186 		{"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
187 		{"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
188 		{"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
189 		{"INVALID ", MCS_INVALID},
190 		{"INVALID ", MCS_INVALID},
191 		{"INVALID ", MCS_INVALID},
192 		{"INVALID ", MCS_INVALID},
193 		{"INVALID ", MCS_VALID},
194 	},
195 	{
196 		{"VHT MCS 0 (BPSK 1/2)     ", MCS_VALID},
197 		{"VHT MCS 1 (QPSK 1/2)     ", MCS_VALID},
198 		{"VHT MCS 2 (QPSK 3/4)     ", MCS_VALID},
199 		{"VHT MCS 3 (16-QAM 1/2)   ", MCS_VALID},
200 		{"VHT MCS 4 (16-QAM 3/4)   ", MCS_VALID},
201 		{"VHT MCS 5 (64-QAM 2/3)   ", MCS_VALID},
202 		{"VHT MCS 6 (64-QAM 3/4)   ", MCS_VALID},
203 		{"VHT MCS 7 (64-QAM 5/6)   ", MCS_VALID},
204 		{"VHT MCS 8 (256-QAM 3/4)  ", MCS_VALID},
205 		{"VHT MCS 9 (256-QAM 5/6)  ", MCS_VALID},
206 		{"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
207 		{"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
208 		{"INVALID ", MCS_VALID},
209 	},
210 	{
211 		{"HE MCS 0 (BPSK 1/2)     ", MCS_VALID},
212 		{"HE MCS 1 (QPSK 1/2)     ", MCS_VALID},
213 		{"HE MCS 2 (QPSK 3/4)     ", MCS_VALID},
214 		{"HE MCS 3 (16-QAM 1/2)   ", MCS_VALID},
215 		{"HE MCS 4 (16-QAM 3/4)   ", MCS_VALID},
216 		{"HE MCS 5 (64-QAM 2/3)   ", MCS_VALID},
217 		{"HE MCS 6 (64-QAM 3/4)   ", MCS_VALID},
218 		{"HE MCS 7 (64-QAM 5/6)   ", MCS_VALID},
219 		{"HE MCS 8 (256-QAM 3/4)  ", MCS_VALID},
220 		{"HE MCS 9 (256-QAM 5/6)  ", MCS_VALID},
221 		{"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
222 		{"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
223 		{"INVALID ", MCS_VALID},
224 	}
225 };
226 
227 /**
228  * @brief Cpu ring map types
229  */
230 enum dp_cpu_ring_map_types {
231 	DP_DEFAULT_MAP,
232 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
233 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
234 	DP_NSS_ALL_RADIO_OFFLOADED_MAP,
235 	DP_CPU_RING_MAP_MAX
236 };
237 
238 /**
239  * @brief Cpu to tx ring map
240  */
241 static uint8_t dp_cpu_ring_map[DP_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
242 	{0x0, 0x1, 0x2, 0x0},
243 	{0x1, 0x2, 0x1, 0x2},
244 	{0x0, 0x2, 0x0, 0x2},
245 	{0x2, 0x2, 0x2, 0x2}
246 };
247 
248 /**
249  * @brief Select the type of statistics
250  */
251 enum dp_stats_type {
252 	STATS_FW = 0,
253 	STATS_HOST = 1,
254 	STATS_TYPE_MAX = 2,
255 };
256 
257 /**
258  * @brief General Firmware statistics options
259  *
260  */
261 enum dp_fw_stats {
262 	TXRX_FW_STATS_INVALID	= -1,
263 };
264 
265 /**
266  * dp_stats_mapping_table - Firmware and Host statistics
267  * currently supported
268  */
269 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
270 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
271 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
272 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
273 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
274 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
275 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
276 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
277 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
278 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
279 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
280 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
281 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
282 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
283 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
284 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
285 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
286 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
287 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
288 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
289 	/* Last ENUM for HTT FW STATS */
290 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
291 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
292 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
293 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
294 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
295 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
296 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
297 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
298 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
299 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
300 };
301 
302 /* MCL specific functions */
303 #ifdef CONFIG_MCL
304 /**
305  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
306  * @soc: pointer to dp_soc handle
307  * @intr_ctx_num: interrupt context number for which mon mask is needed
308  *
309  * For MCL, monitor mode rings are being processed in timer contexts (polled).
310  * This function is returning 0, since in interrupt mode(softirq based RX),
311  * we donot want to process monitor mode rings in a softirq.
312  *
313  * So, in case packet log is enabled for SAP/STA/P2P modes,
314  * regular interrupt processing will not process monitor mode rings. It would be
315  * done in a separate timer context.
316  *
317  * Return: 0
318  */
319 static inline
320 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
321 {
322 	return 0;
323 }
324 
325 /*
326  * dp_service_mon_rings()- timer to reap monitor rings
327  * reqd as we are not getting ppdu end interrupts
328  * @arg: SoC Handle
329  *
330  * Return:
331  *
332  */
333 static void dp_service_mon_rings(void *arg)
334 {
335 	struct dp_soc *soc = (struct dp_soc *)arg;
336 	int ring = 0, work_done, mac_id;
337 	struct dp_pdev *pdev = NULL;
338 
339 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
340 		pdev = soc->pdev_list[ring];
341 		if (!pdev)
342 			continue;
343 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
344 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
345 								pdev->pdev_id);
346 			work_done = dp_mon_process(soc, mac_for_pdev,
347 						   QCA_NAPI_BUDGET);
348 
349 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
350 				  FL("Reaped %d descs from Monitor rings"),
351 				  work_done);
352 		}
353 	}
354 
355 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
356 }
357 
358 #ifndef REMOVE_PKT_LOG
359 /**
360  * dp_pkt_log_init() - API to initialize packet log
361  * @ppdev: physical device handle
362  * @scn: HIF context
363  *
364  * Return: none
365  */
366 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
367 {
368 	struct dp_pdev *handle = (struct dp_pdev *)ppdev;
369 
370 	if (handle->pkt_log_init) {
371 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
372 			  "%s: Packet log not initialized", __func__);
373 		return;
374 	}
375 
376 	pktlog_sethandle(&handle->pl_dev, scn);
377 	pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
378 
379 	if (pktlogmod_init(scn)) {
380 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
381 			  "%s: pktlogmod_init failed", __func__);
382 		handle->pkt_log_init = false;
383 	} else {
384 		handle->pkt_log_init = true;
385 	}
386 }
387 
388 /**
389  * dp_pkt_log_con_service() - connect packet log service
390  * @ppdev: physical device handle
391  * @scn: device context
392  *
393  * Return: none
394  */
395 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
396 {
397 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
398 
399 	dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
400 	pktlog_htc_attach();
401 }
402 
403 /**
404  * dp_pktlogmod_exit() - API to cleanup pktlog info
405  * @handle: Pdev handle
406  *
407  * Return: none
408  */
409 static void dp_pktlogmod_exit(struct dp_pdev *handle)
410 {
411 	void *scn = (void *)handle->soc->hif_handle;
412 
413 	if (!scn) {
414 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
415 			  "%s: Invalid hif(scn) handle", __func__);
416 		return;
417 	}
418 
419 	pktlogmod_exit(scn);
420 	handle->pkt_log_init = false;
421 }
422 #endif
423 #else
424 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
425 
426 /**
427  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
428  * @soc: pointer to dp_soc handle
429  * @intr_ctx_num: interrupt context number for which mon mask is needed
430  *
431  * Return: mon mask value
432  */
433 static inline
434 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
435 {
436 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
437 }
438 #endif
439 
440 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
441 					struct cdp_peer *peer_hdl,
442 					uint8_t *mac_addr,
443 					enum cdp_txrx_ast_entry_type type,
444 					uint32_t flags)
445 {
446 
447 	return dp_peer_add_ast((struct dp_soc *)soc_hdl,
448 				(struct dp_peer *)peer_hdl,
449 				mac_addr,
450 				type,
451 				flags);
452 }
453 
454 static void dp_peer_del_ast_wifi3(struct cdp_soc_t *soc_hdl,
455 					 void *ast_entry_hdl)
456 {
457 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
458 	qdf_spin_lock_bh(&soc->ast_lock);
459 	dp_peer_del_ast((struct dp_soc *)soc_hdl,
460 			(struct dp_ast_entry *)ast_entry_hdl);
461 	qdf_spin_unlock_bh(&soc->ast_lock);
462 }
463 
464 
465 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
466 						struct cdp_peer *peer_hdl,
467 						uint8_t *wds_macaddr,
468 						uint32_t flags)
469 {
470 	int status = -1;
471 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
472 	struct dp_ast_entry  *ast_entry = NULL;
473 
474 	qdf_spin_lock_bh(&soc->ast_lock);
475 	ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
476 
477 	if (ast_entry) {
478 		status = dp_peer_update_ast(soc,
479 					    (struct dp_peer *)peer_hdl,
480 					   ast_entry, flags);
481 	}
482 
483 	qdf_spin_unlock_bh(&soc->ast_lock);
484 
485 	return status;
486 }
487 
488 /*
489  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
490  * @soc_handle:		Datapath SOC handle
491  * @wds_macaddr:	WDS entry MAC Address
492  * Return: None
493  */
494 static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
495 				   uint8_t *wds_macaddr, void *vdev_handle)
496 {
497 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
498 	struct dp_ast_entry *ast_entry = NULL;
499 
500 	qdf_spin_lock_bh(&soc->ast_lock);
501 	ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
502 
503 	if (ast_entry) {
504 		if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
505 		    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF)) {
506 			ast_entry->is_active = TRUE;
507 		}
508 	}
509 
510 	qdf_spin_unlock_bh(&soc->ast_lock);
511 }
512 
513 /*
514  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
515  * @soc:		Datapath SOC handle
516  *
517  * Return: None
518  */
519 static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
520 					 void *vdev_hdl)
521 {
522 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
523 	struct dp_pdev *pdev;
524 	struct dp_vdev *vdev;
525 	struct dp_peer *peer;
526 	struct dp_ast_entry *ase, *temp_ase;
527 	int i;
528 
529 	qdf_spin_lock_bh(&soc->ast_lock);
530 
531 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
532 		pdev = soc->pdev_list[i];
533 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
534 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
535 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
536 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
537 					if ((ase->type ==
538 					     CDP_TXRX_AST_TYPE_STATIC) ||
539 					    (ase->type ==
540 					     CDP_TXRX_AST_TYPE_SELF))
541 						continue;
542 					ase->is_active = TRUE;
543 				}
544 			}
545 		}
546 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
547 	}
548 
549 	qdf_spin_unlock_bh(&soc->ast_lock);
550 }
551 
552 /*
553  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
554  * @soc:		Datapath SOC handle
555  *
556  * Return: None
557  */
558 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
559 {
560 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
561 	struct dp_pdev *pdev;
562 	struct dp_vdev *vdev;
563 	struct dp_peer *peer;
564 	struct dp_ast_entry *ase, *temp_ase;
565 	int i;
566 
567 	qdf_spin_lock_bh(&soc->ast_lock);
568 
569 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
570 		pdev = soc->pdev_list[i];
571 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
572 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
573 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
574 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
575 					if ((ase->type ==
576 					     CDP_TXRX_AST_TYPE_STATIC) ||
577 					    (ase->type ==
578 					     CDP_TXRX_AST_TYPE_SELF))
579 						continue;
580 					dp_peer_del_ast(soc, ase);
581 				}
582 			}
583 		}
584 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
585 	}
586 
587 	qdf_spin_unlock_bh(&soc->ast_lock);
588 }
589 
590 static void *dp_peer_ast_hash_find_wifi3(struct cdp_soc_t *soc_hdl,
591 						uint8_t *ast_mac_addr)
592 {
593 	struct dp_ast_entry *ast_entry;
594 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
595 	qdf_spin_lock_bh(&soc->ast_lock);
596 	ast_entry = dp_peer_ast_hash_find(soc, ast_mac_addr);
597 	qdf_spin_unlock_bh(&soc->ast_lock);
598 	return (void *)ast_entry;
599 }
600 
601 static uint8_t dp_peer_ast_get_pdev_id_wifi3(struct cdp_soc_t *soc_hdl,
602 							void *ast_entry_hdl)
603 {
604 	return dp_peer_ast_get_pdev_id((struct dp_soc *)soc_hdl,
605 					(struct dp_ast_entry *)ast_entry_hdl);
606 }
607 
608 static uint8_t dp_peer_ast_get_next_hop_wifi3(struct cdp_soc_t *soc_hdl,
609 							void *ast_entry_hdl)
610 {
611 	return dp_peer_ast_get_next_hop((struct dp_soc *)soc_hdl,
612 					(struct dp_ast_entry *)ast_entry_hdl);
613 }
614 
615 static void dp_peer_ast_set_type_wifi3(
616 					struct cdp_soc_t *soc_hdl,
617 					void *ast_entry_hdl,
618 					enum cdp_txrx_ast_entry_type type)
619 {
620 	dp_peer_ast_set_type((struct dp_soc *)soc_hdl,
621 				(struct dp_ast_entry *)ast_entry_hdl,
622 				type);
623 }
624 
625 static enum cdp_txrx_ast_entry_type dp_peer_ast_get_type_wifi3(
626 					struct cdp_soc_t *soc_hdl,
627 					void *ast_entry_hdl)
628 {
629 	return ((struct dp_ast_entry *)ast_entry_hdl)->type;
630 }
631 
632 /**
633  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
634  * @ring_num: ring num of the ring being queried
635  * @grp_mask: the grp_mask array for the ring type in question.
636  *
637  * The grp_mask array is indexed by group number and the bit fields correspond
638  * to ring numbers.  We are finding which interrupt group a ring belongs to.
639  *
640  * Return: the index in the grp_mask array with the ring number.
641  * -QDF_STATUS_E_NOENT if no entry is found
642  */
643 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
644 {
645 	int ext_group_num;
646 	int mask = 1 << ring_num;
647 
648 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
649 	     ext_group_num++) {
650 		if (mask & grp_mask[ext_group_num])
651 			return ext_group_num;
652 	}
653 
654 	return -QDF_STATUS_E_NOENT;
655 }
656 
657 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
658 				       enum hal_ring_type ring_type,
659 				       int ring_num)
660 {
661 	int *grp_mask;
662 
663 	switch (ring_type) {
664 	case WBM2SW_RELEASE:
665 		/* dp_tx_comp_handler - soc->tx_comp_ring */
666 		if (ring_num < 3)
667 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
668 
669 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
670 		else if (ring_num == 3) {
671 			/* sw treats this as a separate ring type */
672 			grp_mask = &soc->wlan_cfg_ctx->
673 				int_rx_wbm_rel_ring_mask[0];
674 			ring_num = 0;
675 		} else {
676 			qdf_assert(0);
677 			return -QDF_STATUS_E_NOENT;
678 		}
679 	break;
680 
681 	case REO_EXCEPTION:
682 		/* dp_rx_err_process - &soc->reo_exception_ring */
683 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
684 	break;
685 
686 	case REO_DST:
687 		/* dp_rx_process - soc->reo_dest_ring */
688 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
689 	break;
690 
691 	case REO_STATUS:
692 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
693 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
694 	break;
695 
696 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
697 	case RXDMA_MONITOR_STATUS:
698 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
699 	case RXDMA_MONITOR_DST:
700 		/* dp_mon_process */
701 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
702 	break;
703 	case RXDMA_DST:
704 		/* dp_rxdma_err_process */
705 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
706 	break;
707 
708 	case RXDMA_BUF:
709 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
710 	break;
711 
712 	case RXDMA_MONITOR_BUF:
713 		/* TODO: support low_thresh interrupt */
714 		return -QDF_STATUS_E_NOENT;
715 	break;
716 
717 	case TCL_DATA:
718 	case TCL_CMD:
719 	case REO_CMD:
720 	case SW2WBM_RELEASE:
721 	case WBM_IDLE_LINK:
722 		/* normally empty SW_TO_HW rings */
723 		return -QDF_STATUS_E_NOENT;
724 	break;
725 
726 	case TCL_STATUS:
727 	case REO_REINJECT:
728 		/* misc unused rings */
729 		return -QDF_STATUS_E_NOENT;
730 	break;
731 
732 	case CE_SRC:
733 	case CE_DST:
734 	case CE_DST_STATUS:
735 		/* CE_rings - currently handled by hif */
736 	default:
737 		return -QDF_STATUS_E_NOENT;
738 	break;
739 	}
740 
741 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
742 }
743 
744 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
745 			      *ring_params, int ring_type, int ring_num)
746 {
747 	int msi_group_number;
748 	int msi_data_count;
749 	int ret;
750 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
751 
752 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
753 					    &msi_data_count, &msi_data_start,
754 					    &msi_irq_start);
755 
756 	if (ret)
757 		return;
758 
759 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
760 						       ring_num);
761 	if (msi_group_number < 0) {
762 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
763 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
764 			ring_type, ring_num);
765 		ring_params->msi_addr = 0;
766 		ring_params->msi_data = 0;
767 		return;
768 	}
769 
770 	if (msi_group_number > msi_data_count) {
771 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
772 			FL("2 msi_groups will share an msi; msi_group_num %d"),
773 			msi_group_number);
774 
775 		QDF_ASSERT(0);
776 	}
777 
778 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
779 
780 	ring_params->msi_addr = addr_low;
781 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
782 	ring_params->msi_data = (msi_group_number % msi_data_count)
783 		+ msi_data_start;
784 	ring_params->flags |= HAL_SRNG_MSI_INTR;
785 }
786 
787 /**
788  * dp_print_ast_stats() - Dump AST table contents
789  * @soc: Datapath soc handle
790  *
791  * return void
792  */
793 #ifdef FEATURE_AST
794 static void dp_print_ast_stats(struct dp_soc *soc)
795 {
796 	uint8_t i;
797 	uint8_t num_entries = 0;
798 	struct dp_vdev *vdev;
799 	struct dp_pdev *pdev;
800 	struct dp_peer *peer;
801 	struct dp_ast_entry *ase, *tmp_ase;
802 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
803 			"NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS"};
804 
805 	DP_PRINT_STATS("AST Stats:");
806 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
807 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
808 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
809 	DP_PRINT_STATS("AST Table:");
810 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
811 		pdev = soc->pdev_list[i];
812 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
813 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
814 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
815 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
816 					DP_PRINT_STATS("%6d mac_addr = %pM"
817 							" peer_mac_addr = %pM"
818 							" type = %s"
819 							" next_hop = %d"
820 							" is_active = %d"
821 							" is_bss = %d"
822 							" ast_idx = %d"
823 							" pdev_id = %d"
824 							" vdev_id = %d",
825 							++num_entries,
826 							ase->mac_addr.raw,
827 							ase->peer->mac_addr.raw,
828 							type[ase->type],
829 							ase->next_hop,
830 							ase->is_active,
831 							ase->is_bss,
832 							ase->ast_idx,
833 							ase->pdev_id,
834 							ase->vdev_id);
835 				}
836 			}
837 		}
838 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
839 	}
840 }
841 #else
842 static void dp_print_ast_stats(struct dp_soc *soc)
843 {
844 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
845 	return;
846 }
847 #endif
848 
849 static void dp_print_peer_table(struct dp_vdev *vdev)
850 {
851 	struct dp_peer *peer = NULL;
852 
853 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
854 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
855 		if (!peer) {
856 			DP_PRINT_STATS("Invalid Peer");
857 			return;
858 		}
859 		DP_PRINT_STATS("    peer_mac_addr = %pM"
860 			" nawds_enabled = %d"
861 			" bss_peer = %d"
862 			" wapi = %d"
863 			" wds_enabled = %d"
864 			" delete in progress = %d",
865 			peer->mac_addr.raw,
866 			peer->nawds_enabled,
867 			peer->bss_peer,
868 			peer->wapi,
869 			peer->wds_enabled,
870 			peer->delete_in_progress);
871 	}
872 }
873 
874 /*
875  * dp_setup_srng - Internal function to setup SRNG rings used by data path
876  */
877 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
878 	int ring_type, int ring_num, int mac_id, uint32_t num_entries)
879 {
880 	void *hal_soc = soc->hal_soc;
881 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
882 	/* TODO: See if we should get align size from hal */
883 	uint32_t ring_base_align = 8;
884 	struct hal_srng_params ring_params;
885 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
886 
887 	/* TODO: Currently hal layer takes care of endianness related settings.
888 	 * See if these settings need to passed from DP layer
889 	 */
890 	ring_params.flags = 0;
891 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
892 		  FL("Ring type: %d, num:%d"), ring_type, ring_num);
893 
894 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
895 	srng->hal_srng = NULL;
896 	srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
897 	srng->num_entries = num_entries;
898 	srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
899 		soc->osdev, soc->osdev->dev, srng->alloc_size,
900 		&(srng->base_paddr_unaligned));
901 
902 	if (!srng->base_vaddr_unaligned) {
903 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
904 			FL("alloc failed - ring_type: %d, ring_num %d"),
905 			ring_type, ring_num);
906 		return QDF_STATUS_E_NOMEM;
907 	}
908 
909 	ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
910 		((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
911 	ring_params.ring_base_paddr = srng->base_paddr_unaligned +
912 		((unsigned long)(ring_params.ring_base_vaddr) -
913 		(unsigned long)srng->base_vaddr_unaligned);
914 	ring_params.num_entries = num_entries;
915 
916 	if (soc->intr_mode == DP_INTR_MSI) {
917 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
918 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
919 			  FL("Using MSI for ring_type: %d, ring_num %d"),
920 			  ring_type, ring_num);
921 
922 	} else {
923 		ring_params.msi_data = 0;
924 		ring_params.msi_addr = 0;
925 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
926 			  FL("Skipping MSI for ring_type: %d, ring_num %d"),
927 			  ring_type, ring_num);
928 	}
929 
930 	/*
931 	 * Setup interrupt timer and batch counter thresholds for
932 	 * interrupt mitigation based on ring type
933 	 */
934 	if (ring_type == REO_DST) {
935 		ring_params.intr_timer_thres_us =
936 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
937 		ring_params.intr_batch_cntr_thres_entries =
938 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
939 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
940 		ring_params.intr_timer_thres_us =
941 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
942 		ring_params.intr_batch_cntr_thres_entries =
943 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
944 	} else {
945 		ring_params.intr_timer_thres_us =
946 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
947 		ring_params.intr_batch_cntr_thres_entries =
948 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
949 	}
950 
951 	/* Enable low threshold interrupts for rx buffer rings (regular and
952 	 * monitor buffer rings.
953 	 * TODO: See if this is required for any other ring
954 	 */
955 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
956 		(ring_type == RXDMA_MONITOR_STATUS)) {
957 		/* TODO: Setting low threshold to 1/8th of ring size
958 		 * see if this needs to be configurable
959 		 */
960 		ring_params.low_threshold = num_entries >> 3;
961 		ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
962 		ring_params.intr_timer_thres_us =
963 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
964 		ring_params.intr_batch_cntr_thres_entries = 0;
965 	}
966 
967 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
968 		mac_id, &ring_params);
969 
970 	if (!srng->hal_srng) {
971 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
972 				srng->alloc_size,
973 				srng->base_vaddr_unaligned,
974 				srng->base_paddr_unaligned, 0);
975 	}
976 
977 	return 0;
978 }
979 
980 /**
981  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
982  * Any buffers allocated and attached to ring entries are expected to be freed
983  * before calling this function.
984  */
985 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
986 	int ring_type, int ring_num)
987 {
988 	if (!srng->hal_srng) {
989 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
990 			FL("Ring type: %d, num:%d not setup"),
991 			ring_type, ring_num);
992 		return;
993 	}
994 
995 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
996 
997 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
998 				srng->alloc_size,
999 				srng->base_vaddr_unaligned,
1000 				srng->base_paddr_unaligned, 0);
1001 	srng->hal_srng = NULL;
1002 }
1003 
1004 /* TODO: Need this interface from HIF */
1005 void *hif_get_hal_handle(void *hif_handle);
1006 
1007 /*
1008  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1009  * @dp_ctx: DP SOC handle
1010  * @budget: Number of frames/descriptors that can be processed in one shot
1011  *
1012  * Return: remaining budget/quota for the soc device
1013  */
1014 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
1015 {
1016 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1017 	struct dp_soc *soc = int_ctx->soc;
1018 	int ring = 0;
1019 	uint32_t work_done  = 0;
1020 	int budget = dp_budget;
1021 	uint8_t tx_mask = int_ctx->tx_ring_mask;
1022 	uint8_t rx_mask = int_ctx->rx_ring_mask;
1023 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1024 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1025 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1026 	uint32_t remaining_quota = dp_budget;
1027 	struct dp_pdev *pdev = NULL;
1028 	int mac_id;
1029 
1030 	/* Process Tx completion interrupts first to return back buffers */
1031 	while (tx_mask) {
1032 		if (tx_mask & 0x1) {
1033 			work_done = dp_tx_comp_handler(soc,
1034 					soc->tx_comp_ring[ring].hal_srng,
1035 					remaining_quota);
1036 
1037 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1038 				"tx mask 0x%x ring %d, budget %d, work_done %d",
1039 				tx_mask, ring, budget, work_done);
1040 
1041 			budget -= work_done;
1042 			if (budget <= 0)
1043 				goto budget_done;
1044 
1045 			remaining_quota = budget;
1046 		}
1047 		tx_mask = tx_mask >> 1;
1048 		ring++;
1049 	}
1050 
1051 
1052 	/* Process REO Exception ring interrupt */
1053 	if (rx_err_mask) {
1054 		work_done = dp_rx_err_process(soc,
1055 				soc->reo_exception_ring.hal_srng,
1056 				remaining_quota);
1057 
1058 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1059 			"REO Exception Ring: work_done %d budget %d",
1060 			work_done, budget);
1061 
1062 		budget -=  work_done;
1063 		if (budget <= 0) {
1064 			goto budget_done;
1065 		}
1066 		remaining_quota = budget;
1067 	}
1068 
1069 	/* Process Rx WBM release ring interrupt */
1070 	if (rx_wbm_rel_mask) {
1071 		work_done = dp_rx_wbm_err_process(soc,
1072 				soc->rx_rel_ring.hal_srng, remaining_quota);
1073 
1074 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1075 			"WBM Release Ring: work_done %d budget %d",
1076 			work_done, budget);
1077 
1078 		budget -=  work_done;
1079 		if (budget <= 0) {
1080 			goto budget_done;
1081 		}
1082 		remaining_quota = budget;
1083 	}
1084 
1085 	/* Process Rx interrupts */
1086 	if (rx_mask) {
1087 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1088 			if (rx_mask & (1 << ring)) {
1089 				work_done = dp_rx_process(int_ctx,
1090 					    soc->reo_dest_ring[ring].hal_srng,
1091 					    remaining_quota);
1092 
1093 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1094 					"rx mask 0x%x ring %d, work_done %d budget %d",
1095 					rx_mask, ring, work_done, budget);
1096 
1097 				budget -=  work_done;
1098 				if (budget <= 0)
1099 					goto budget_done;
1100 				remaining_quota = budget;
1101 			}
1102 		}
1103 		for (ring = 0; ring < MAX_RX_MAC_RINGS; ring++) {
1104 			work_done = dp_rxdma_err_process(soc, ring,
1105 						remaining_quota);
1106 			budget -= work_done;
1107 		}
1108 	}
1109 
1110 	if (reo_status_mask)
1111 		dp_reo_status_ring_handler(soc);
1112 
1113 	/* Process LMAC interrupts */
1114 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
1115 		pdev = soc->pdev_list[ring];
1116 		if (pdev == NULL)
1117 			continue;
1118 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1119 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
1120 								pdev->pdev_id);
1121 
1122 			if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1123 				work_done = dp_mon_process(soc, mac_for_pdev,
1124 						remaining_quota);
1125 				budget -= work_done;
1126 				if (budget <= 0)
1127 					goto budget_done;
1128 				remaining_quota = budget;
1129 			}
1130 
1131 			if (int_ctx->rxdma2host_ring_mask &
1132 					(1 << mac_for_pdev)) {
1133 				work_done = dp_rxdma_err_process(soc,
1134 							mac_for_pdev,
1135 							remaining_quota);
1136 				budget -=  work_done;
1137 				if (budget <= 0)
1138 					goto budget_done;
1139 				remaining_quota = budget;
1140 			}
1141 
1142 			if (int_ctx->host2rxdma_ring_mask &
1143 						(1 << mac_for_pdev)) {
1144 				union dp_rx_desc_list_elem_t *desc_list = NULL;
1145 				union dp_rx_desc_list_elem_t *tail = NULL;
1146 				struct dp_srng *rx_refill_buf_ring =
1147 					&pdev->rx_refill_buf_ring;
1148 
1149 				DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1150 						1);
1151 				dp_rx_buffers_replenish(soc, mac_for_pdev,
1152 					rx_refill_buf_ring,
1153 					&soc->rx_desc_buf[mac_for_pdev], 0,
1154 					&desc_list, &tail);
1155 			}
1156 		}
1157 	}
1158 
1159 	qdf_lro_flush(int_ctx->lro_ctx);
1160 
1161 budget_done:
1162 	return dp_budget - budget;
1163 }
1164 
1165 #ifdef DP_INTR_POLL_BASED
1166 /* dp_interrupt_timer()- timer poll for interrupts
1167  *
1168  * @arg: SoC Handle
1169  *
1170  * Return:
1171  *
1172  */
1173 static void dp_interrupt_timer(void *arg)
1174 {
1175 	struct dp_soc *soc = (struct dp_soc *) arg;
1176 	int i;
1177 
1178 	if (qdf_atomic_read(&soc->cmn_init_done)) {
1179 		for (i = 0;
1180 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1181 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
1182 
1183 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1184 	}
1185 }
1186 
1187 /*
1188  * dp_soc_interrupt_attach_poll() - Register handlers for DP interrupts
1189  * @txrx_soc: DP SOC handle
1190  *
1191  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1192  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1193  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1194  *
1195  * Return: 0 for success. nonzero for failure.
1196  */
1197 static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
1198 {
1199 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1200 	int i;
1201 
1202 	soc->intr_mode = DP_INTR_POLL;
1203 
1204 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1205 		soc->intr_ctx[i].dp_intr_id = i;
1206 		soc->intr_ctx[i].tx_ring_mask =
1207 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1208 		soc->intr_ctx[i].rx_ring_mask =
1209 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1210 		soc->intr_ctx[i].rx_mon_ring_mask =
1211 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1212 		soc->intr_ctx[i].rx_err_ring_mask =
1213 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1214 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1215 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1216 		soc->intr_ctx[i].reo_status_ring_mask =
1217 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1218 		soc->intr_ctx[i].rxdma2host_ring_mask =
1219 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1220 		soc->intr_ctx[i].soc = soc;
1221 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1222 	}
1223 
1224 	qdf_timer_init(soc->osdev, &soc->int_timer,
1225 			dp_interrupt_timer, (void *)soc,
1226 			QDF_TIMER_TYPE_WAKE_APPS);
1227 
1228 	return QDF_STATUS_SUCCESS;
1229 }
1230 #else
1231 static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
1232 {
1233 	return -QDF_STATUS_E_NOSUPPORT;
1234 }
1235 #endif
1236 
1237 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
1238 #if defined(CONFIG_MCL)
1239 extern int con_mode_monitor;
1240 /*
1241  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1242  * @txrx_soc: DP SOC handle
1243  *
1244  * Call the appropriate attach function based on the mode of operation.
1245  * This is a WAR for enabling monitor mode.
1246  *
1247  * Return: 0 for success. nonzero for failure.
1248  */
1249 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1250 {
1251 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1252 
1253 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1254 	     con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
1255 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1256 				  "%s: Poll mode", __func__);
1257 		return dp_soc_attach_poll(txrx_soc);
1258 	} else {
1259 
1260 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1261 				  "%s: Interrupt  mode", __func__);
1262 		return dp_soc_interrupt_attach(txrx_soc);
1263 	}
1264 }
1265 #else
1266 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
1267 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1268 {
1269 	return dp_soc_attach_poll(txrx_soc);
1270 }
1271 #else
1272 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1273 {
1274 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1275 
1276 	if (hif_is_polled_mode_enabled(soc->hif_handle))
1277 		return dp_soc_attach_poll(txrx_soc);
1278 	else
1279 		return dp_soc_interrupt_attach(txrx_soc);
1280 }
1281 #endif
1282 #endif
1283 
1284 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1285 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1286 {
1287 	int j;
1288 	int num_irq = 0;
1289 
1290 	int tx_mask =
1291 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1292 	int rx_mask =
1293 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1294 	int rx_mon_mask =
1295 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1296 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1297 					soc->wlan_cfg_ctx, intr_ctx_num);
1298 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1299 					soc->wlan_cfg_ctx, intr_ctx_num);
1300 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1301 					soc->wlan_cfg_ctx, intr_ctx_num);
1302 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1303 					soc->wlan_cfg_ctx, intr_ctx_num);
1304 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1305 					soc->wlan_cfg_ctx, intr_ctx_num);
1306 
1307 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1308 
1309 		if (tx_mask & (1 << j)) {
1310 			irq_id_map[num_irq++] =
1311 				(wbm2host_tx_completions_ring1 - j);
1312 		}
1313 
1314 		if (rx_mask & (1 << j)) {
1315 			irq_id_map[num_irq++] =
1316 				(reo2host_destination_ring1 - j);
1317 		}
1318 
1319 		if (rxdma2host_ring_mask & (1 << j)) {
1320 			irq_id_map[num_irq++] =
1321 				rxdma2host_destination_ring_mac1 -
1322 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1323 		}
1324 
1325 		if (host2rxdma_ring_mask & (1 << j)) {
1326 			irq_id_map[num_irq++] =
1327 				host2rxdma_host_buf_ring_mac1 -
1328 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1329 		}
1330 
1331 		if (rx_mon_mask & (1 << j)) {
1332 			irq_id_map[num_irq++] =
1333 				ppdu_end_interrupts_mac1 -
1334 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1335 			irq_id_map[num_irq++] =
1336 				rxdma2host_monitor_status_ring_mac1 -
1337 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1338 		}
1339 
1340 		if (rx_wbm_rel_ring_mask & (1 << j))
1341 			irq_id_map[num_irq++] = wbm2host_rx_release;
1342 
1343 		if (rx_err_ring_mask & (1 << j))
1344 			irq_id_map[num_irq++] = reo2host_exception;
1345 
1346 		if (reo_status_ring_mask & (1 << j))
1347 			irq_id_map[num_irq++] = reo2host_status;
1348 
1349 	}
1350 	*num_irq_r = num_irq;
1351 }
1352 
1353 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1354 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1355 		int msi_vector_count, int msi_vector_start)
1356 {
1357 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1358 					soc->wlan_cfg_ctx, intr_ctx_num);
1359 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1360 					soc->wlan_cfg_ctx, intr_ctx_num);
1361 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1362 					soc->wlan_cfg_ctx, intr_ctx_num);
1363 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1364 					soc->wlan_cfg_ctx, intr_ctx_num);
1365 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1366 					soc->wlan_cfg_ctx, intr_ctx_num);
1367 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1368 					soc->wlan_cfg_ctx, intr_ctx_num);
1369 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1370 					soc->wlan_cfg_ctx, intr_ctx_num);
1371 
1372 	unsigned int vector =
1373 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1374 	int num_irq = 0;
1375 
1376 	soc->intr_mode = DP_INTR_MSI;
1377 
1378 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1379 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1380 		irq_id_map[num_irq++] =
1381 			pld_get_msi_irq(soc->osdev->dev, vector);
1382 
1383 	*num_irq_r = num_irq;
1384 }
1385 
1386 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1387 				    int *irq_id_map, int *num_irq)
1388 {
1389 	int msi_vector_count, ret;
1390 	uint32_t msi_base_data, msi_vector_start;
1391 
1392 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1393 					    &msi_vector_count,
1394 					    &msi_base_data,
1395 					    &msi_vector_start);
1396 	if (ret)
1397 		return dp_soc_interrupt_map_calculate_integrated(soc,
1398 				intr_ctx_num, irq_id_map, num_irq);
1399 
1400 	else
1401 		dp_soc_interrupt_map_calculate_msi(soc,
1402 				intr_ctx_num, irq_id_map, num_irq,
1403 				msi_vector_count, msi_vector_start);
1404 }
1405 
1406 /*
1407  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1408  * @txrx_soc: DP SOC handle
1409  *
1410  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1411  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1412  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1413  *
1414  * Return: 0 for success. nonzero for failure.
1415  */
1416 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
1417 {
1418 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1419 
1420 	int i = 0;
1421 	int num_irq = 0;
1422 
1423 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1424 		int ret = 0;
1425 
1426 		/* Map of IRQ ids registered with one interrupt context */
1427 		int irq_id_map[HIF_MAX_GRP_IRQ];
1428 
1429 		int tx_mask =
1430 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1431 		int rx_mask =
1432 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1433 		int rx_mon_mask =
1434 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
1435 		int rx_err_ring_mask =
1436 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1437 		int rx_wbm_rel_ring_mask =
1438 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1439 		int reo_status_ring_mask =
1440 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1441 		int rxdma2host_ring_mask =
1442 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1443 		int host2rxdma_ring_mask =
1444 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1445 
1446 
1447 		soc->intr_ctx[i].dp_intr_id = i;
1448 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1449 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1450 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1451 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1452 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1453 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1454 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1455 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1456 
1457 		soc->intr_ctx[i].soc = soc;
1458 
1459 		num_irq = 0;
1460 
1461 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1462 					       &num_irq);
1463 
1464 		ret = hif_register_ext_group(soc->hif_handle,
1465 				num_irq, irq_id_map, dp_service_srngs,
1466 				&soc->intr_ctx[i], "dp_intr",
1467 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1468 
1469 		if (ret) {
1470 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1471 			FL("failed, ret = %d"), ret);
1472 
1473 			return QDF_STATUS_E_FAILURE;
1474 		}
1475 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1476 	}
1477 
1478 	hif_configure_ext_group_interrupts(soc->hif_handle);
1479 
1480 	return QDF_STATUS_SUCCESS;
1481 }
1482 
1483 /*
1484  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1485  * @txrx_soc: DP SOC handle
1486  *
1487  * Return: void
1488  */
1489 static void dp_soc_interrupt_detach(void *txrx_soc)
1490 {
1491 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1492 	int i;
1493 
1494 	if (soc->intr_mode == DP_INTR_POLL) {
1495 		qdf_timer_stop(&soc->int_timer);
1496 		qdf_timer_free(&soc->int_timer);
1497 	} else {
1498 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1499 	}
1500 
1501 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1502 		soc->intr_ctx[i].tx_ring_mask = 0;
1503 		soc->intr_ctx[i].rx_ring_mask = 0;
1504 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1505 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1506 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1507 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1508 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1509 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1510 
1511 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1512 	}
1513 }
1514 
1515 #define AVG_MAX_MPDUS_PER_TID 128
1516 #define AVG_TIDS_PER_CLIENT 2
1517 #define AVG_FLOWS_PER_TID 2
1518 #define AVG_MSDUS_PER_FLOW 128
1519 #define AVG_MSDUS_PER_MPDU 4
1520 
1521 /*
1522  * Allocate and setup link descriptor pool that will be used by HW for
1523  * various link and queue descriptors and managed by WBM
1524  */
1525 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1526 {
1527 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1528 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1529 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1530 	uint32_t num_mpdus_per_link_desc =
1531 		hal_num_mpdus_per_link_desc(soc->hal_soc);
1532 	uint32_t num_msdus_per_link_desc =
1533 		hal_num_msdus_per_link_desc(soc->hal_soc);
1534 	uint32_t num_mpdu_links_per_queue_desc =
1535 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1536 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1537 	uint32_t total_link_descs, total_mem_size;
1538 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1539 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1540 	uint32_t num_link_desc_banks;
1541 	uint32_t last_bank_size = 0;
1542 	uint32_t entry_size, num_entries;
1543 	int i;
1544 	uint32_t desc_id = 0;
1545 
1546 	/* Only Tx queue descriptors are allocated from common link descriptor
1547 	 * pool Rx queue descriptors are not included in this because (REO queue
1548 	 * extension descriptors) they are expected to be allocated contiguously
1549 	 * with REO queue descriptors
1550 	 */
1551 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1552 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1553 
1554 	num_mpdu_queue_descs = num_mpdu_link_descs /
1555 		num_mpdu_links_per_queue_desc;
1556 
1557 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1558 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1559 		num_msdus_per_link_desc;
1560 
1561 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1562 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1563 
1564 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1565 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1566 
1567 	/* Round up to power of 2 */
1568 	total_link_descs = 1;
1569 	while (total_link_descs < num_entries)
1570 		total_link_descs <<= 1;
1571 
1572 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1573 		FL("total_link_descs: %u, link_desc_size: %d"),
1574 		total_link_descs, link_desc_size);
1575 	total_mem_size =  total_link_descs * link_desc_size;
1576 
1577 	total_mem_size += link_desc_align;
1578 
1579 	if (total_mem_size <= max_alloc_size) {
1580 		num_link_desc_banks = 0;
1581 		last_bank_size = total_mem_size;
1582 	} else {
1583 		num_link_desc_banks = (total_mem_size) /
1584 			(max_alloc_size - link_desc_align);
1585 		last_bank_size = total_mem_size %
1586 			(max_alloc_size - link_desc_align);
1587 	}
1588 
1589 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1590 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
1591 		total_mem_size, num_link_desc_banks);
1592 
1593 	for (i = 0; i < num_link_desc_banks; i++) {
1594 		soc->link_desc_banks[i].base_vaddr_unaligned =
1595 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1596 			max_alloc_size,
1597 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1598 		soc->link_desc_banks[i].size = max_alloc_size;
1599 
1600 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1601 			soc->link_desc_banks[i].base_vaddr_unaligned) +
1602 			((unsigned long)(
1603 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1604 			link_desc_align));
1605 
1606 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
1607 			soc->link_desc_banks[i].base_paddr_unaligned) +
1608 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1609 			(unsigned long)(
1610 			soc->link_desc_banks[i].base_vaddr_unaligned));
1611 
1612 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
1613 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1614 				FL("Link descriptor memory alloc failed"));
1615 			goto fail;
1616 		}
1617 	}
1618 
1619 	if (last_bank_size) {
1620 		/* Allocate last bank in case total memory required is not exact
1621 		 * multiple of max_alloc_size
1622 		 */
1623 		soc->link_desc_banks[i].base_vaddr_unaligned =
1624 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1625 			last_bank_size,
1626 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1627 		soc->link_desc_banks[i].size = last_bank_size;
1628 
1629 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1630 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
1631 			((unsigned long)(
1632 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1633 			link_desc_align));
1634 
1635 		soc->link_desc_banks[i].base_paddr =
1636 			(unsigned long)(
1637 			soc->link_desc_banks[i].base_paddr_unaligned) +
1638 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1639 			(unsigned long)(
1640 			soc->link_desc_banks[i].base_vaddr_unaligned));
1641 	}
1642 
1643 
1644 	/* Allocate and setup link descriptor idle list for HW internal use */
1645 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1646 	total_mem_size = entry_size * total_link_descs;
1647 
1648 	if (total_mem_size <= max_alloc_size) {
1649 		void *desc;
1650 
1651 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
1652 			WBM_IDLE_LINK, 0, 0, total_link_descs)) {
1653 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1654 				FL("Link desc idle ring setup failed"));
1655 			goto fail;
1656 		}
1657 
1658 		hal_srng_access_start_unlocked(soc->hal_soc,
1659 			soc->wbm_idle_link_ring.hal_srng);
1660 
1661 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1662 			soc->link_desc_banks[i].base_paddr; i++) {
1663 			uint32_t num_entries = (soc->link_desc_banks[i].size -
1664 				((unsigned long)(
1665 				soc->link_desc_banks[i].base_vaddr) -
1666 				(unsigned long)(
1667 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1668 				/ link_desc_size;
1669 			unsigned long paddr = (unsigned long)(
1670 				soc->link_desc_banks[i].base_paddr);
1671 
1672 			while (num_entries && (desc = hal_srng_src_get_next(
1673 				soc->hal_soc,
1674 				soc->wbm_idle_link_ring.hal_srng))) {
1675 				hal_set_link_desc_addr(desc,
1676 					LINK_DESC_COOKIE(desc_id, i), paddr);
1677 				num_entries--;
1678 				desc_id++;
1679 				paddr += link_desc_size;
1680 			}
1681 		}
1682 		hal_srng_access_end_unlocked(soc->hal_soc,
1683 			soc->wbm_idle_link_ring.hal_srng);
1684 	} else {
1685 		uint32_t num_scatter_bufs;
1686 		uint32_t num_entries_per_buf;
1687 		uint32_t rem_entries;
1688 		uint8_t *scatter_buf_ptr;
1689 		uint16_t scatter_buf_num;
1690 
1691 		soc->wbm_idle_scatter_buf_size =
1692 			hal_idle_list_scatter_buf_size(soc->hal_soc);
1693 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
1694 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
1695 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1696 					soc->hal_soc, total_mem_size,
1697 					soc->wbm_idle_scatter_buf_size);
1698 
1699 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
1700 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1701 					FL("scatter bufs size out of bounds"));
1702 			goto fail;
1703 		}
1704 
1705 		for (i = 0; i < num_scatter_bufs; i++) {
1706 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
1707 				qdf_mem_alloc_consistent(soc->osdev,
1708 							soc->osdev->dev,
1709 				soc->wbm_idle_scatter_buf_size,
1710 				&(soc->wbm_idle_scatter_buf_base_paddr[i]));
1711 			if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
1712 				QDF_TRACE(QDF_MODULE_ID_DP,
1713 						QDF_TRACE_LEVEL_ERROR,
1714 					FL("Scatter list memory alloc failed"));
1715 				goto fail;
1716 			}
1717 		}
1718 
1719 		/* Populate idle list scatter buffers with link descriptor
1720 		 * pointers
1721 		 */
1722 		scatter_buf_num = 0;
1723 		scatter_buf_ptr = (uint8_t *)(
1724 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
1725 		rem_entries = num_entries_per_buf;
1726 
1727 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1728 			soc->link_desc_banks[i].base_paddr; i++) {
1729 			uint32_t num_link_descs =
1730 				(soc->link_desc_banks[i].size -
1731 				((unsigned long)(
1732 				soc->link_desc_banks[i].base_vaddr) -
1733 				(unsigned long)(
1734 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1735 				/ link_desc_size;
1736 			unsigned long paddr = (unsigned long)(
1737 				soc->link_desc_banks[i].base_paddr);
1738 
1739 			while (num_link_descs) {
1740 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
1741 					LINK_DESC_COOKIE(desc_id, i), paddr);
1742 				num_link_descs--;
1743 				desc_id++;
1744 				paddr += link_desc_size;
1745 				rem_entries--;
1746 				if (rem_entries) {
1747 					scatter_buf_ptr += entry_size;
1748 				} else {
1749 					rem_entries = num_entries_per_buf;
1750 					scatter_buf_num++;
1751 
1752 					if (scatter_buf_num >= num_scatter_bufs)
1753 						break;
1754 
1755 					scatter_buf_ptr = (uint8_t *)(
1756 						soc->wbm_idle_scatter_buf_base_vaddr[
1757 						scatter_buf_num]);
1758 				}
1759 			}
1760 		}
1761 		/* Setup link descriptor idle list in HW */
1762 		hal_setup_link_idle_list(soc->hal_soc,
1763 			soc->wbm_idle_scatter_buf_base_paddr,
1764 			soc->wbm_idle_scatter_buf_base_vaddr,
1765 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
1766 			(uint32_t)(scatter_buf_ptr -
1767 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
1768 			scatter_buf_num-1])), total_link_descs);
1769 	}
1770 	return 0;
1771 
1772 fail:
1773 	if (soc->wbm_idle_link_ring.hal_srng) {
1774 		dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
1775 			WBM_IDLE_LINK, 0);
1776 	}
1777 
1778 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1779 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1780 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1781 				soc->wbm_idle_scatter_buf_size,
1782 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1783 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1784 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1785 		}
1786 	}
1787 
1788 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1789 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1790 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1791 				soc->link_desc_banks[i].size,
1792 				soc->link_desc_banks[i].base_vaddr_unaligned,
1793 				soc->link_desc_banks[i].base_paddr_unaligned,
1794 				0);
1795 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1796 		}
1797 	}
1798 	return QDF_STATUS_E_FAILURE;
1799 }
1800 
1801 /*
1802  * Free link descriptor pool that was setup HW
1803  */
1804 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
1805 {
1806 	int i;
1807 
1808 	if (soc->wbm_idle_link_ring.hal_srng) {
1809 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
1810 			WBM_IDLE_LINK, 0);
1811 	}
1812 
1813 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1814 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1815 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1816 				soc->wbm_idle_scatter_buf_size,
1817 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1818 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1819 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1820 		}
1821 	}
1822 
1823 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1824 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1825 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1826 				soc->link_desc_banks[i].size,
1827 				soc->link_desc_banks[i].base_vaddr_unaligned,
1828 				soc->link_desc_banks[i].base_paddr_unaligned,
1829 				0);
1830 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1831 		}
1832 	}
1833 }
1834 
1835 #define REO_DST_RING_SIZE_QCA6290 1024
1836 #define REO_DST_RING_SIZE_QCA8074 2048
1837 
1838 /*
1839  * dp_wds_aging_timer_fn() - Timer callback function for WDS aging
1840  * @soc: Datapath SOC handle
1841  *
1842  * This is a timer function used to age out stale AST nodes from
1843  * AST table
1844  */
1845 #ifdef FEATURE_WDS
1846 static void dp_wds_aging_timer_fn(void *soc_hdl)
1847 {
1848 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
1849 	struct dp_pdev *pdev;
1850 	struct dp_vdev *vdev;
1851 	struct dp_peer *peer;
1852 	struct dp_ast_entry *ase, *temp_ase;
1853 	int i;
1854 
1855 	qdf_spin_lock_bh(&soc->ast_lock);
1856 
1857 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1858 		pdev = soc->pdev_list[i];
1859 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
1860 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1861 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1862 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
1863 					/*
1864 					 * Do not expire static ast entries
1865 					 * and HM WDS entries
1866 					 */
1867 					if (ase->type != CDP_TXRX_AST_TYPE_WDS)
1868 						continue;
1869 
1870 					if (ase->is_active) {
1871 						ase->is_active = FALSE;
1872 						continue;
1873 					}
1874 
1875 					DP_STATS_INC(soc, ast.aged_out, 1);
1876 					dp_peer_del_ast(soc, ase);
1877 				}
1878 			}
1879 		}
1880 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1881 	}
1882 
1883 	qdf_spin_unlock_bh(&soc->ast_lock);
1884 
1885 	if (qdf_atomic_read(&soc->cmn_init_done))
1886 		qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1887 }
1888 
1889 
1890 /*
1891  * dp_soc_wds_attach() - Setup WDS timer and AST table
1892  * @soc:		Datapath SOC handle
1893  *
1894  * Return: None
1895  */
1896 static void dp_soc_wds_attach(struct dp_soc *soc)
1897 {
1898 	qdf_timer_init(soc->osdev, &soc->wds_aging_timer,
1899 			dp_wds_aging_timer_fn, (void *)soc,
1900 			QDF_TIMER_TYPE_WAKE_APPS);
1901 
1902 	qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1903 }
1904 
1905 /*
1906  * dp_soc_wds_detach() - Detach WDS data structures and timers
1907  * @txrx_soc: DP SOC handle
1908  *
1909  * Return: None
1910  */
1911 static void dp_soc_wds_detach(struct dp_soc *soc)
1912 {
1913 	qdf_timer_stop(&soc->wds_aging_timer);
1914 	qdf_timer_free(&soc->wds_aging_timer);
1915 }
1916 #else
1917 static void dp_soc_wds_attach(struct dp_soc *soc)
1918 {
1919 }
1920 
1921 static void dp_soc_wds_detach(struct dp_soc *soc)
1922 {
1923 }
1924 #endif
1925 
1926 /*
1927  * dp_soc_reset_ring_map() - Reset cpu ring map
1928  * @soc: Datapath soc handler
1929  *
1930  * This api resets the default cpu ring map
1931  */
1932 
1933 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
1934 {
1935 	uint8_t i;
1936 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1937 
1938 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
1939 		if (nss_config == 1) {
1940 			/*
1941 			 * Setting Tx ring map for one nss offloaded radio
1942 			 */
1943 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
1944 		} else if (nss_config == 2) {
1945 			/*
1946 			 * Setting Tx ring for two nss offloaded radios
1947 			 */
1948 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
1949 		} else {
1950 			/*
1951 			 * Setting Tx ring map for all nss offloaded radios
1952 			 */
1953 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_ALL_RADIO_OFFLOADED_MAP][i];
1954 		}
1955 	}
1956 }
1957 
1958 /*
1959  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
1960  * @dp_soc - DP soc handle
1961  * @ring_type - ring type
1962  * @ring_num - ring_num
1963  *
1964  * return 0 or 1
1965  */
1966 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
1967 {
1968 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1969 	uint8_t status = 0;
1970 
1971 	switch (ring_type) {
1972 	case WBM2SW_RELEASE:
1973 	case REO_DST:
1974 	case RXDMA_BUF:
1975 		status = ((nss_config) & (1 << ring_num));
1976 		break;
1977 	default:
1978 		break;
1979 	}
1980 
1981 	return status;
1982 }
1983 
1984 /*
1985  * dp_soc_reset_intr_mask() - reset interrupt mask
1986  * @dp_soc - DP Soc handle
1987  *
1988  * Return: Return void
1989  */
1990 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
1991 {
1992 	uint8_t j;
1993 	int *grp_mask = NULL;
1994 	int group_number, mask, num_ring;
1995 
1996 	/* number of tx ring */
1997 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
1998 
1999 	/*
2000 	 * group mask for tx completion  ring.
2001 	 */
2002 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2003 
2004 	/* loop and reset the mask for only offloaded ring */
2005 	for (j = 0; j < num_ring; j++) {
2006 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2007 			continue;
2008 		}
2009 
2010 		/*
2011 		 * Group number corresponding to tx offloaded ring.
2012 		 */
2013 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2014 		if (group_number < 0) {
2015 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2016 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2017 					WBM2SW_RELEASE, j);
2018 			return;
2019 		}
2020 
2021 		/* reset the tx mask for offloaded ring */
2022 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2023 		mask &= (~(1 << j));
2024 
2025 		/*
2026 		 * reset the interrupt mask for offloaded ring.
2027 		 */
2028 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2029 	}
2030 
2031 	/* number of rx rings */
2032 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2033 
2034 	/*
2035 	 * group mask for reo destination ring.
2036 	 */
2037 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2038 
2039 	/* loop and reset the mask for only offloaded ring */
2040 	for (j = 0; j < num_ring; j++) {
2041 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2042 			continue;
2043 		}
2044 
2045 		/*
2046 		 * Group number corresponding to rx offloaded ring.
2047 		 */
2048 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2049 		if (group_number < 0) {
2050 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2051 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2052 					REO_DST, j);
2053 			return;
2054 		}
2055 
2056 		/* set the interrupt mask for offloaded ring */
2057 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2058 		mask &= (~(1 << j));
2059 
2060 		/*
2061 		 * set the interrupt mask to zero for rx offloaded radio.
2062 		 */
2063 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2064 	}
2065 
2066 	/*
2067 	 * group mask for Rx buffer refill ring
2068 	 */
2069 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2070 
2071 	/* loop and reset the mask for only offloaded ring */
2072 	for (j = 0; j < MAX_PDEV_CNT; j++) {
2073 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2074 			continue;
2075 		}
2076 
2077 		/*
2078 		 * Group number corresponding to rx offloaded ring.
2079 		 */
2080 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2081 		if (group_number < 0) {
2082 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2083 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2084 					REO_DST, j);
2085 			return;
2086 		}
2087 
2088 		/* set the interrupt mask for offloaded ring */
2089 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2090 				group_number);
2091 		mask &= (~(1 << j));
2092 
2093 		/*
2094 		 * set the interrupt mask to zero for rx offloaded radio.
2095 		 */
2096 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2097 			group_number, mask);
2098 	}
2099 }
2100 
2101 #ifdef IPA_OFFLOAD
2102 /**
2103  * dp_reo_remap_config() - configure reo remap register value based
2104  *                         nss configuration.
2105  *		based on offload_radio value below remap configuration
2106  *		get applied.
2107  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2108  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2109  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2110  *		3 - both Radios handled by NSS (remap not required)
2111  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2112  *
2113  * @remap1: output parameter indicates reo remap 1 register value
2114  * @remap2: output parameter indicates reo remap 2 register value
2115  * Return: bool type, true if remap is configured else false.
2116  */
2117 static bool dp_reo_remap_config(struct dp_soc *soc,
2118 				uint32_t *remap1,
2119 				uint32_t *remap2)
2120 {
2121 
2122 	*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
2123 		(0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
2124 
2125 	*remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
2126 		(0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
2127 
2128 	return true;
2129 }
2130 #else
2131 static bool dp_reo_remap_config(struct dp_soc *soc,
2132 				uint32_t *remap1,
2133 				uint32_t *remap2)
2134 {
2135 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2136 
2137 	switch (offload_radio) {
2138 	case 0:
2139 		*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2140 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2141 			(0x3 << 18) | (0x4 << 21)) << 8;
2142 
2143 		*remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2144 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2145 			(0x3 << 18) | (0x4 << 21)) << 8;
2146 		break;
2147 
2148 	case 1:
2149 		*remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
2150 			(0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
2151 			(0x2 << 18) | (0x3 << 21)) << 8;
2152 
2153 		*remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
2154 			(0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
2155 			(0x4 << 18) | (0x2 << 21)) << 8;
2156 		break;
2157 
2158 	case 2:
2159 		*remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
2160 			(0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
2161 			(0x1 << 18) | (0x3 << 21)) << 8;
2162 
2163 		*remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
2164 			(0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
2165 			(0x4 << 18) | (0x1 << 21)) << 8;
2166 		break;
2167 
2168 	case 3:
2169 		/* return false if both radios are offloaded to NSS */
2170 		return false;
2171 	}
2172 	return true;
2173 }
2174 #endif
2175 
2176 /*
2177  * dp_reo_frag_dst_set() - configure reo register to set the
2178  *                        fragment destination ring
2179  * @soc : Datapath soc
2180  * @frag_dst_ring : output parameter to set fragment destination ring
2181  *
2182  * Based on offload_radio below fragment destination rings is selected
2183  * 0 - TCL
2184  * 1 - SW1
2185  * 2 - SW2
2186  * 3 - SW3
2187  * 4 - SW4
2188  * 5 - Release
2189  * 6 - FW
2190  * 7 - alternate select
2191  *
2192  * return: void
2193  */
2194 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2195 {
2196 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2197 
2198 	switch (offload_radio) {
2199 	case 0:
2200 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2201 		break;
2202 	case 3:
2203 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2204 		break;
2205 	default:
2206 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2207 				FL("dp_reo_frag_dst_set invalid offload radio config"));
2208 		break;
2209 	}
2210 }
2211 
2212 /*
2213  * dp_soc_cmn_setup() - Common SoC level initializion
2214  * @soc:		Datapath SOC handle
2215  *
2216  * This is an internal function used to setup common SOC data structures,
2217  * to be called from PDEV attach after receiving HW mode capabilities from FW
2218  */
2219 static int dp_soc_cmn_setup(struct dp_soc *soc)
2220 {
2221 	int i;
2222 	struct hal_reo_params reo_params;
2223 	int tx_ring_size;
2224 	int tx_comp_ring_size;
2225 	int reo_dst_ring_size;
2226 	uint32_t entries;
2227 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2228 
2229 	if (qdf_atomic_read(&soc->cmn_init_done))
2230 		return 0;
2231 
2232 	if (dp_hw_link_desc_pool_setup(soc))
2233 		goto fail1;
2234 
2235 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2236 	/* Setup SRNG rings */
2237 	/* Common rings */
2238 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
2239 		wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx))) {
2240 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2241 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
2242 		goto fail1;
2243 	}
2244 
2245 
2246 	soc->num_tcl_data_rings = 0;
2247 	/* Tx data rings */
2248 	if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
2249 		soc->num_tcl_data_rings =
2250 			wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
2251 		tx_comp_ring_size =
2252 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2253 		tx_ring_size =
2254 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2255 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2256 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
2257 				TCL_DATA, i, 0, tx_ring_size)) {
2258 				QDF_TRACE(QDF_MODULE_ID_DP,
2259 					QDF_TRACE_LEVEL_ERROR,
2260 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
2261 				goto fail1;
2262 			}
2263 			/*
2264 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2265 			 * count
2266 			 */
2267 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
2268 				WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
2269 				QDF_TRACE(QDF_MODULE_ID_DP,
2270 					QDF_TRACE_LEVEL_ERROR,
2271 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
2272 				goto fail1;
2273 			}
2274 		}
2275 	} else {
2276 		/* This will be incremented during per pdev ring setup */
2277 		soc->num_tcl_data_rings = 0;
2278 	}
2279 
2280 	if (dp_tx_soc_attach(soc)) {
2281 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2282 				FL("dp_tx_soc_attach failed"));
2283 		goto fail1;
2284 	}
2285 
2286 	entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
2287 	/* TCL command and status rings */
2288 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
2289 			  entries)) {
2290 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2291 			FL("dp_srng_setup failed for tcl_cmd_ring"));
2292 		goto fail1;
2293 	}
2294 
2295 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
2296 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
2297 			  entries)) {
2298 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2299 			FL("dp_srng_setup failed for tcl_status_ring"));
2300 		goto fail1;
2301 	}
2302 
2303 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2304 
2305 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2306 	 * descriptors
2307 	 */
2308 
2309 	/* Rx data rings */
2310 	if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2311 		soc->num_reo_dest_rings =
2312 			wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
2313 		QDF_TRACE(QDF_MODULE_ID_DP,
2314 			QDF_TRACE_LEVEL_INFO,
2315 			FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
2316 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2317 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2318 				i, 0, reo_dst_ring_size)) {
2319 				QDF_TRACE(QDF_MODULE_ID_DP,
2320 					  QDF_TRACE_LEVEL_ERROR,
2321 					  FL(RNG_ERR "reo_dest_ring [%d]"), i);
2322 				goto fail1;
2323 			}
2324 		}
2325 	} else {
2326 		/* This will be incremented during per pdev ring setup */
2327 		soc->num_reo_dest_rings = 0;
2328 	}
2329 
2330 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2331 	/* LMAC RxDMA to SW Rings configuration */
2332 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
2333 		/* Only valid for MCL */
2334 		struct dp_pdev *pdev = soc->pdev_list[0];
2335 
2336 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2337 			if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2338 					  RXDMA_DST, 0, i,
2339 					  entries)) {
2340 				QDF_TRACE(QDF_MODULE_ID_DP,
2341 					  QDF_TRACE_LEVEL_ERROR,
2342 					  FL(RNG_ERR "rxdma_err_dst_ring"));
2343 				goto fail1;
2344 			}
2345 		}
2346 	}
2347 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
2348 
2349 	/* REO reinjection ring */
2350 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
2351 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2352 			  entries)) {
2353 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2354 			  FL("dp_srng_setup failed for reo_reinject_ring"));
2355 		goto fail1;
2356 	}
2357 
2358 
2359 	/* Rx release ring */
2360 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2361 		wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx))) {
2362 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2363 			  FL("dp_srng_setup failed for rx_rel_ring"));
2364 		goto fail1;
2365 	}
2366 
2367 
2368 	/* Rx exception ring */
2369 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
2370 	if (dp_srng_setup(soc, &soc->reo_exception_ring,
2371 			  REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries)) {
2372 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2373 			  FL("dp_srng_setup failed for reo_exception_ring"));
2374 		goto fail1;
2375 	}
2376 
2377 
2378 	/* REO command and status rings */
2379 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
2380 		wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx))) {
2381 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2382 			FL("dp_srng_setup failed for reo_cmd_ring"));
2383 		goto fail1;
2384 	}
2385 
2386 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2387 	TAILQ_INIT(&soc->rx.reo_cmd_list);
2388 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2389 
2390 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
2391 		wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx))) {
2392 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2393 			FL("dp_srng_setup failed for reo_status_ring"));
2394 		goto fail1;
2395 	}
2396 
2397 	qdf_spinlock_create(&soc->ast_lock);
2398 	dp_soc_wds_attach(soc);
2399 
2400 	/* Reset the cpu ring map if radio is NSS offloaded */
2401 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
2402 		dp_soc_reset_cpu_ring_map(soc);
2403 		dp_soc_reset_intr_mask(soc);
2404 	}
2405 
2406 	/* Setup HW REO */
2407 	qdf_mem_zero(&reo_params, sizeof(reo_params));
2408 
2409 	if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
2410 
2411 		/*
2412 		 * Reo ring remap is not required if both radios
2413 		 * are offloaded to NSS
2414 		 */
2415 		if (!dp_reo_remap_config(soc,
2416 					&reo_params.remap1,
2417 					&reo_params.remap2))
2418 			goto out;
2419 
2420 		reo_params.rx_hash_enabled = true;
2421 	}
2422 
2423 	/* setup the global rx defrag waitlist */
2424 	TAILQ_INIT(&soc->rx.defrag.waitlist);
2425 	soc->rx.defrag.timeout_ms =
2426 		wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
2427 	soc->rx.flags.defrag_timeout_check =
2428 		wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
2429 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
2430 
2431 out:
2432 	/*
2433 	 * set the fragment destination ring
2434 	 */
2435 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2436 
2437 	hal_reo_setup(soc->hal_soc, &reo_params);
2438 
2439 	qdf_atomic_set(&soc->cmn_init_done, 1);
2440 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
2441 	return 0;
2442 fail1:
2443 	/*
2444 	 * Cleanup will be done as part of soc_detach, which will
2445 	 * be called on pdev attach failure
2446 	 */
2447 	return QDF_STATUS_E_FAILURE;
2448 }
2449 
2450 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
2451 
2452 static void dp_lro_hash_setup(struct dp_soc *soc)
2453 {
2454 	struct cdp_lro_hash_config lro_hash;
2455 
2456 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2457 		!wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2458 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2459 			 FL("LRO disabled RX hash disabled"));
2460 		return;
2461 	}
2462 
2463 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2464 
2465 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
2466 		lro_hash.lro_enable = 1;
2467 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2468 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
2469 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2470 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
2471 	}
2472 
2473 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, FL("enabled"));
2474 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
2475 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2476 		 LRO_IPV4_SEED_ARR_SZ));
2477 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2478 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2479 		 LRO_IPV6_SEED_ARR_SZ));
2480 
2481 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2482 		 "lro_hash: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
2483 		 lro_hash.lro_enable, lro_hash.tcp_flag,
2484 		 lro_hash.tcp_flag_mask);
2485 
2486 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2487 		 QDF_TRACE_LEVEL_ERROR,
2488 		 (void *)lro_hash.toeplitz_hash_ipv4,
2489 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2490 		 LRO_IPV4_SEED_ARR_SZ));
2491 
2492 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2493 		 QDF_TRACE_LEVEL_ERROR,
2494 		 (void *)lro_hash.toeplitz_hash_ipv6,
2495 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2496 		 LRO_IPV6_SEED_ARR_SZ));
2497 
2498 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2499 
2500 	if (soc->cdp_soc.ol_ops->lro_hash_config)
2501 		(void)soc->cdp_soc.ol_ops->lro_hash_config
2502 			(soc->ctrl_psoc, &lro_hash);
2503 }
2504 
2505 /*
2506 * dp_rxdma_ring_setup() - configure the RX DMA rings
2507 * @soc: data path SoC handle
2508 * @pdev: Physical device handle
2509 *
2510 * Return: 0 - success, > 0 - failure
2511 */
2512 #ifdef QCA_HOST2FW_RXBUF_RING
2513 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2514 	 struct dp_pdev *pdev)
2515 {
2516 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2517 	int max_mac_rings;
2518 	int i;
2519 
2520 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2521 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
2522 
2523 	for (i = 0; i < max_mac_rings; i++) {
2524 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2525 			 "%s: pdev_id %d mac_id %d",
2526 			 __func__, pdev->pdev_id, i);
2527 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
2528 			RXDMA_BUF, 1, i,
2529 			wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx))) {
2530 			QDF_TRACE(QDF_MODULE_ID_DP,
2531 				 QDF_TRACE_LEVEL_ERROR,
2532 				 FL("failed rx mac ring setup"));
2533 			return QDF_STATUS_E_FAILURE;
2534 		}
2535 	}
2536 	return QDF_STATUS_SUCCESS;
2537 }
2538 #else
2539 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2540 	 struct dp_pdev *pdev)
2541 {
2542 	return QDF_STATUS_SUCCESS;
2543 }
2544 #endif
2545 
2546 /**
2547  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2548  * @pdev - DP_PDEV handle
2549  *
2550  * Return: void
2551  */
2552 static inline void
2553 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2554 {
2555 	uint8_t map_id;
2556 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2557 		qdf_mem_copy(pdev->dscp_tid_map[map_id], default_dscp_tid_map,
2558 				sizeof(default_dscp_tid_map));
2559 	}
2560 	for (map_id = 0; map_id < HAL_MAX_HW_DSCP_TID_MAPS; map_id++) {
2561 		hal_tx_set_dscp_tid_map(pdev->soc->hal_soc,
2562 				pdev->dscp_tid_map[map_id],
2563 				map_id);
2564 	}
2565 }
2566 
2567 #ifdef QCA_SUPPORT_SON
2568 /**
2569  * dp_mark_peer_inact(): Update peer inactivity status
2570  * @peer_handle - datapath peer handle
2571  *
2572  * Return: void
2573  */
2574 void dp_mark_peer_inact(void *peer_handle, bool inactive)
2575 {
2576 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2577 	struct dp_pdev *pdev;
2578 	struct dp_soc *soc;
2579 	bool inactive_old;
2580 
2581 	if (!peer)
2582 		return;
2583 
2584 	pdev = peer->vdev->pdev;
2585 	soc = pdev->soc;
2586 
2587 	inactive_old = peer->peer_bs_inact_flag == 1;
2588 	if (!inactive)
2589 		peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2590 	peer->peer_bs_inact_flag = inactive ? 1 : 0;
2591 
2592 	if (inactive_old != inactive) {
2593 		/**
2594 		 * Note: a node lookup can happen in RX datapath context
2595 		 * when a node changes from inactive to active (at most once
2596 		 * per inactivity timeout threshold)
2597 		 */
2598 		if (soc->cdp_soc.ol_ops->record_act_change) {
2599 			soc->cdp_soc.ol_ops->record_act_change(
2600 					(void *)pdev->ctrl_pdev,
2601 					peer->mac_addr.raw, !inactive);
2602 		}
2603 	}
2604 }
2605 
2606 /**
2607  * dp_txrx_peer_find_inact_timeout_handler(): Inactivity timeout function
2608  *
2609  * Periodically checks the inactivity status
2610  */
2611 static os_timer_func(dp_txrx_peer_find_inact_timeout_handler)
2612 {
2613 	struct dp_pdev *pdev;
2614 	struct dp_vdev *vdev;
2615 	struct dp_peer *peer;
2616 	struct dp_soc *soc;
2617 	int i;
2618 
2619 	OS_GET_TIMER_ARG(soc, struct dp_soc *);
2620 
2621 	qdf_spin_lock(&soc->peer_ref_mutex);
2622 
2623 	for (i = 0; i < soc->pdev_count; i++) {
2624 	pdev = soc->pdev_list[i];
2625 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
2626 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2627 		if (vdev->opmode != wlan_op_mode_ap)
2628 			continue;
2629 
2630 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2631 			if (!peer->authorize) {
2632 				/**
2633 				 * Inactivity check only interested in
2634 				 * connected node
2635 				 */
2636 				continue;
2637 			}
2638 			if (peer->peer_bs_inact > soc->pdev_bs_inact_reload) {
2639 				/**
2640 				 * This check ensures we do not wait extra long
2641 				 * due to the potential race condition
2642 				 */
2643 				peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2644 			}
2645 			if (peer->peer_bs_inact > 0) {
2646 				/* Do not let it wrap around */
2647 				peer->peer_bs_inact--;
2648 			}
2649 			if (peer->peer_bs_inact == 0)
2650 				dp_mark_peer_inact(peer, true);
2651 		}
2652 	}
2653 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2654 	}
2655 
2656 	qdf_spin_unlock(&soc->peer_ref_mutex);
2657 	qdf_timer_mod(&soc->pdev_bs_inact_timer,
2658 		      soc->pdev_bs_inact_interval * 1000);
2659 }
2660 
2661 
2662 /**
2663  * dp_free_inact_timer(): free inact timer
2664  * @timer - inact timer handle
2665  *
2666  * Return: bool
2667  */
2668 void dp_free_inact_timer(struct dp_soc *soc)
2669 {
2670 	qdf_timer_free(&soc->pdev_bs_inact_timer);
2671 }
2672 #else
2673 
2674 void dp_mark_peer_inact(void *peer, bool inactive)
2675 {
2676 	return;
2677 }
2678 
2679 void dp_free_inact_timer(struct dp_soc *soc)
2680 {
2681 	return;
2682 }
2683 
2684 #endif
2685 
2686 #ifdef IPA_OFFLOAD
2687 /**
2688  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
2689  * @soc: data path instance
2690  * @pdev: core txrx pdev context
2691  *
2692  * Return: QDF_STATUS_SUCCESS: success
2693  *         QDF_STATUS_E_RESOURCES: Error return
2694  */
2695 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2696 					   struct dp_pdev *pdev)
2697 {
2698 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2699 	int entries;
2700 
2701 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2702 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
2703 
2704 	/* Setup second Rx refill buffer ring */
2705 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2706 			  IPA_RX_REFILL_BUF_RING_IDX,
2707 			  pdev->pdev_id,
2708 			  entries)) {
2709 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2710 			FL("dp_srng_setup failed second rx refill ring"));
2711 		return QDF_STATUS_E_FAILURE;
2712 	}
2713 	return QDF_STATUS_SUCCESS;
2714 }
2715 
2716 /**
2717  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
2718  * @soc: data path instance
2719  * @pdev: core txrx pdev context
2720  *
2721  * Return: void
2722  */
2723 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2724 					      struct dp_pdev *pdev)
2725 {
2726 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2727 			IPA_RX_REFILL_BUF_RING_IDX);
2728 }
2729 
2730 #else
2731 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2732 					   struct dp_pdev *pdev)
2733 {
2734 	return QDF_STATUS_SUCCESS;
2735 }
2736 
2737 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2738 					      struct dp_pdev *pdev)
2739 {
2740 }
2741 #endif
2742 
2743 #ifndef QCA_WIFI_QCA6390
2744 static
2745 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2746 {
2747 	int mac_id = 0;
2748 	int pdev_id = pdev->pdev_id;
2749 	int entries;
2750 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2751 
2752 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2753 
2754 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
2755 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
2756 
2757 		entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
2758 		if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
2759 				  RXDMA_MONITOR_BUF, 0, mac_for_pdev,
2760 				  entries)) {
2761 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2762 				  FL(RNG_ERR "rxdma_mon_buf_ring "));
2763 			return QDF_STATUS_E_NOMEM;
2764 		}
2765 
2766 		entries = wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
2767 		if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
2768 				  RXDMA_MONITOR_DST, 0, mac_for_pdev,
2769 				  entries)) {
2770 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2771 				  FL(RNG_ERR "rxdma_mon_dst_ring"));
2772 			return QDF_STATUS_E_NOMEM;
2773 		}
2774 
2775 		entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
2776 		if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring[mac_id],
2777 				  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
2778 				  entries)) {
2779 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2780 				  FL(RNG_ERR "rxdma_mon_status_ring"));
2781 			return QDF_STATUS_E_NOMEM;
2782 		}
2783 
2784 		entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
2785 		if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
2786 				  RXDMA_MONITOR_DESC, 0, mac_for_pdev,
2787 				  entries)) {
2788 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2789 				  FL(RNG_ERR "rxdma_mon_desc_ring"));
2790 			return QDF_STATUS_E_NOMEM;
2791 		}
2792 	}
2793 	return QDF_STATUS_SUCCESS;
2794 }
2795 #else
2796 static QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2797 {
2798 	return QDF_STATUS_SUCCESS;
2799 }
2800 #endif
2801 
2802 /*
2803 * dp_pdev_attach_wifi3() - attach txrx pdev
2804 * @ctrl_pdev: Opaque PDEV object
2805 * @txrx_soc: Datapath SOC handle
2806 * @htc_handle: HTC handle for host-target interface
2807 * @qdf_osdev: QDF OS device
2808 * @pdev_id: PDEV ID
2809 *
2810 * Return: DP PDEV handle on success, NULL on failure
2811 */
2812 static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
2813 	struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
2814 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
2815 {
2816 	int tx_ring_size;
2817 	int tx_comp_ring_size;
2818 	int reo_dst_ring_size;
2819 	int entries;
2820 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2821 	int nss_cfg;
2822 
2823 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2824 	struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
2825 
2826 	if (!pdev) {
2827 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2828 			FL("DP PDEV memory allocation failed"));
2829 		goto fail0;
2830 	}
2831 
2832 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2833 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
2834 
2835 	if (!pdev->wlan_cfg_ctx) {
2836 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2837 			FL("pdev cfg_attach failed"));
2838 
2839 		qdf_mem_free(pdev);
2840 		goto fail0;
2841 	}
2842 
2843 	/*
2844 	 * set nss pdev config based on soc config
2845 	 */
2846 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
2847 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
2848 			(nss_cfg & (1 << pdev_id)));
2849 
2850 	pdev->soc = soc;
2851 	pdev->ctrl_pdev = ctrl_pdev;
2852 	pdev->pdev_id = pdev_id;
2853 	soc->pdev_list[pdev_id] = pdev;
2854 	soc->pdev_count++;
2855 
2856 	TAILQ_INIT(&pdev->vdev_list);
2857 	qdf_spinlock_create(&pdev->vdev_list_lock);
2858 	pdev->vdev_count = 0;
2859 
2860 	qdf_spinlock_create(&pdev->tx_mutex);
2861 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
2862 	TAILQ_INIT(&pdev->neighbour_peers_list);
2863 	pdev->neighbour_peers_added = false;
2864 
2865 	if (dp_soc_cmn_setup(soc)) {
2866 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2867 			FL("dp_soc_cmn_setup failed"));
2868 		goto fail1;
2869 	}
2870 
2871 	/* Setup per PDEV TCL rings if configured */
2872 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2873 		tx_ring_size =
2874 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2875 		tx_comp_ring_size =
2876 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2877 
2878 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
2879 			pdev_id, pdev_id, tx_ring_size)) {
2880 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2881 				FL("dp_srng_setup failed for tcl_data_ring"));
2882 			goto fail1;
2883 		}
2884 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
2885 			WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
2886 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2887 				FL("dp_srng_setup failed for tx_comp_ring"));
2888 			goto fail1;
2889 		}
2890 		soc->num_tcl_data_rings++;
2891 	}
2892 
2893 	/* Tx specific init */
2894 	if (dp_tx_pdev_attach(pdev)) {
2895 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2896 			FL("dp_tx_pdev_attach failed"));
2897 		goto fail1;
2898 	}
2899 
2900 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2901 	/* Setup per PDEV REO rings if configured */
2902 	if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2903 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
2904 			pdev_id, pdev_id, reo_dst_ring_size)) {
2905 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2906 				FL("dp_srng_setup failed for reo_dest_ringn"));
2907 			goto fail1;
2908 		}
2909 		soc->num_reo_dest_rings++;
2910 
2911 	}
2912 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
2913 		wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx))) {
2914 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2915 			 FL("dp_srng_setup failed rx refill ring"));
2916 		goto fail1;
2917 	}
2918 
2919 	if (dp_rxdma_ring_setup(soc, pdev)) {
2920 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2921 			 FL("RXDMA ring config failed"));
2922 		goto fail1;
2923 	}
2924 
2925 	if (dp_mon_rings_setup(soc, pdev)) {
2926 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2927 			  FL("MONITOR rings setup failed"));
2928 		goto fail1;
2929 	}
2930 
2931 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2932 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2933 		if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
2934 				  0, pdev_id,
2935 				  entries)) {
2936 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2937 				  FL(RNG_ERR "rxdma_err_dst_ring"));
2938 			goto fail1;
2939 		}
2940 	}
2941 
2942 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
2943 		goto fail1;
2944 
2945 	if (dp_ipa_ring_resource_setup(soc, pdev))
2946 		goto fail1;
2947 
2948 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
2949 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2950 			FL("dp_ipa_uc_attach failed"));
2951 		goto fail1;
2952 	}
2953 
2954 	/* Rx specific init */
2955 	if (dp_rx_pdev_attach(pdev)) {
2956 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2957 			FL("dp_rx_pdev_attach failed"));
2958 		goto fail0;
2959 	}
2960 	DP_STATS_INIT(pdev);
2961 
2962 	/* Monitor filter init */
2963 	pdev->mon_filter_mode = MON_FILTER_ALL;
2964 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
2965 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
2966 	pdev->fp_data_filter = FILTER_DATA_ALL;
2967 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
2968 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
2969 	pdev->mo_data_filter = FILTER_DATA_ALL;
2970 
2971 	dp_local_peer_id_pool_init(pdev);
2972 
2973 	dp_dscp_tid_map_setup(pdev);
2974 
2975 	/* Rx monitor mode specific init */
2976 	if (dp_rx_pdev_mon_attach(pdev)) {
2977 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2978 				"dp_rx_pdev_attach failed");
2979 		goto fail1;
2980 	}
2981 
2982 	if (dp_wdi_event_attach(pdev)) {
2983 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2984 				"dp_wdi_evet_attach failed");
2985 		goto fail1;
2986 	}
2987 
2988 	/* set the reo destination during initialization */
2989 	pdev->reo_dest = pdev->pdev_id + 1;
2990 
2991 	/*
2992 	 * initialize ppdu tlv list
2993 	 */
2994 	TAILQ_INIT(&pdev->ppdu_info_list);
2995 	pdev->tlv_count = 0;
2996 	pdev->list_depth = 0;
2997 
2998 	return (struct cdp_pdev *)pdev;
2999 
3000 fail1:
3001 	dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
3002 
3003 fail0:
3004 	return NULL;
3005 }
3006 
3007 /*
3008 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
3009 * @soc: data path SoC handle
3010 * @pdev: Physical device handle
3011 *
3012 * Return: void
3013 */
3014 #ifdef QCA_HOST2FW_RXBUF_RING
3015 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3016 	 struct dp_pdev *pdev)
3017 {
3018 	int max_mac_rings =
3019 		 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
3020 	int i;
3021 
3022 	max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
3023 				max_mac_rings : MAX_RX_MAC_RINGS;
3024 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
3025 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
3026 			 RXDMA_BUF, 1);
3027 
3028 	qdf_timer_free(&soc->mon_reap_timer);
3029 }
3030 #else
3031 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3032 	 struct dp_pdev *pdev)
3033 {
3034 }
3035 #endif
3036 
3037 /*
3038  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3039  * @pdev: device object
3040  *
3041  * Return: void
3042  */
3043 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3044 {
3045 	struct dp_neighbour_peer *peer = NULL;
3046 	struct dp_neighbour_peer *temp_peer = NULL;
3047 
3048 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3049 			neighbour_peer_list_elem, temp_peer) {
3050 		/* delete this peer from the list */
3051 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
3052 				peer, neighbour_peer_list_elem);
3053 		qdf_mem_free(peer);
3054 	}
3055 
3056 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3057 }
3058 
3059 /**
3060 * dp_htt_ppdu_stats_detach() - detach stats resources
3061 * @pdev: Datapath PDEV handle
3062 *
3063 * Return: void
3064 */
3065 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3066 {
3067 	struct ppdu_info *ppdu_info, *ppdu_info_next;
3068 
3069 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3070 			ppdu_info_list_elem, ppdu_info_next) {
3071 		if (!ppdu_info)
3072 			break;
3073 		qdf_assert_always(ppdu_info->nbuf);
3074 		qdf_nbuf_free(ppdu_info->nbuf);
3075 		qdf_mem_free(ppdu_info);
3076 	}
3077 }
3078 
3079 #ifndef QCA_WIFI_QCA6390
3080 static
3081 void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3082 			int mac_id)
3083 {
3084 		dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
3085 				RXDMA_MONITOR_BUF, 0);
3086 		dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
3087 				RXDMA_MONITOR_DST, 0);
3088 
3089 		dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring[mac_id],
3090 				RXDMA_MONITOR_STATUS, 0);
3091 
3092 		dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
3093 				RXDMA_MONITOR_DESC, 0);
3094 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3095 				RXDMA_DST, 0);
3096 }
3097 #else
3098 static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3099 			       int mac_id)
3100 {
3101 }
3102 #endif
3103 
3104 /*
3105 * dp_pdev_detach_wifi3() - detach txrx pdev
3106 * @txrx_pdev: Datapath PDEV handle
3107 * @force: Force detach
3108 *
3109 */
3110 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
3111 {
3112 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3113 	struct dp_soc *soc = pdev->soc;
3114 	qdf_nbuf_t curr_nbuf, next_nbuf;
3115 	int mac_id;
3116 
3117 	dp_wdi_event_detach(pdev);
3118 
3119 	dp_tx_pdev_detach(pdev);
3120 
3121 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3122 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
3123 			TCL_DATA, pdev->pdev_id);
3124 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
3125 			WBM2SW_RELEASE, pdev->pdev_id);
3126 	}
3127 
3128 	dp_pktlogmod_exit(pdev);
3129 
3130 	dp_rx_pdev_detach(pdev);
3131 	dp_rx_pdev_mon_detach(pdev);
3132 	dp_neighbour_peers_detach(pdev);
3133 	qdf_spinlock_destroy(&pdev->tx_mutex);
3134 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
3135 
3136 	dp_ipa_uc_detach(soc, pdev);
3137 
3138 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
3139 
3140 	/* Cleanup per PDEV REO rings if configured */
3141 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3142 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
3143 			REO_DST, pdev->pdev_id);
3144 	}
3145 
3146 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
3147 
3148 	dp_rxdma_ring_cleanup(soc, pdev);
3149 
3150 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3151 		dp_mon_ring_deinit(soc, pdev, mac_id);
3152 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3153 			RXDMA_DST, 0);
3154 	}
3155 
3156 	curr_nbuf = pdev->invalid_peer_head_msdu;
3157 	while (curr_nbuf) {
3158 		next_nbuf = qdf_nbuf_next(curr_nbuf);
3159 		qdf_nbuf_free(curr_nbuf);
3160 		curr_nbuf = next_nbuf;
3161 	}
3162 
3163 	dp_htt_ppdu_stats_detach(pdev);
3164 
3165 	soc->pdev_list[pdev->pdev_id] = NULL;
3166 	soc->pdev_count--;
3167 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
3168 	qdf_mem_free(pdev->dp_txrx_handle);
3169 	qdf_mem_free(pdev);
3170 }
3171 
3172 /*
3173  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
3174  * @soc: DP SOC handle
3175  */
3176 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
3177 {
3178 	struct reo_desc_list_node *desc;
3179 	struct dp_rx_tid *rx_tid;
3180 
3181 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3182 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
3183 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3184 		rx_tid = &desc->rx_tid;
3185 		qdf_mem_unmap_nbytes_single(soc->osdev,
3186 			rx_tid->hw_qdesc_paddr,
3187 			QDF_DMA_BIDIRECTIONAL,
3188 			rx_tid->hw_qdesc_alloc_size);
3189 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3190 		qdf_mem_free(desc);
3191 	}
3192 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3193 	qdf_list_destroy(&soc->reo_desc_freelist);
3194 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
3195 }
3196 
3197 /*
3198  * dp_soc_detach_wifi3() - Detach txrx SOC
3199  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
3200  */
3201 static void dp_soc_detach_wifi3(void *txrx_soc)
3202 {
3203 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3204 	int i;
3205 
3206 	qdf_atomic_set(&soc->cmn_init_done, 0);
3207 
3208 	qdf_flush_work(&soc->htt_stats.work);
3209 	qdf_disable_work(&soc->htt_stats.work);
3210 
3211 	/* Free pending htt stats messages */
3212 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
3213 
3214 	dp_free_inact_timer(soc);
3215 
3216 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3217 		if (soc->pdev_list[i])
3218 			dp_pdev_detach_wifi3(
3219 				(struct cdp_pdev *)soc->pdev_list[i], 1);
3220 	}
3221 
3222 	dp_peer_find_detach(soc);
3223 
3224 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
3225 	 * SW descriptors
3226 	 */
3227 
3228 	/* Free the ring memories */
3229 	/* Common rings */
3230 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3231 
3232 	dp_tx_soc_detach(soc);
3233 	/* Tx data rings */
3234 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3235 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3236 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
3237 				TCL_DATA, i);
3238 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
3239 				WBM2SW_RELEASE, i);
3240 		}
3241 	}
3242 
3243 	/* TCL command and status rings */
3244 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3245 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3246 
3247 	/* Rx data rings */
3248 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3249 		soc->num_reo_dest_rings =
3250 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3251 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
3252 			/* TODO: Get number of rings and ring sizes
3253 			 * from wlan_cfg
3254 			 */
3255 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
3256 				REO_DST, i);
3257 		}
3258 	}
3259 	/* REO reinjection ring */
3260 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3261 
3262 	/* Rx release ring */
3263 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3264 
3265 	/* Rx exception ring */
3266 	/* TODO: Better to store ring_type and ring_num in
3267 	 * dp_srng during setup
3268 	 */
3269 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3270 
3271 	/* REO command and status rings */
3272 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3273 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
3274 	dp_hw_link_desc_pool_cleanup(soc);
3275 
3276 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
3277 	qdf_spinlock_destroy(&soc->htt_stats.lock);
3278 
3279 	htt_soc_detach(soc->htt_handle);
3280 
3281 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3282 
3283 	dp_reo_cmdlist_destroy(soc);
3284 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3285 	dp_reo_desc_freelist_destroy(soc);
3286 
3287 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
3288 
3289 	dp_soc_wds_detach(soc);
3290 	qdf_spinlock_destroy(&soc->ast_lock);
3291 
3292 	qdf_mem_free(soc);
3293 }
3294 
3295 #ifndef QCA_WIFI_QCA6390
3296 static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3297 				  struct dp_pdev *pdev,
3298 				  int mac_id,
3299 				  int mac_for_pdev)
3300 {
3301 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3302 		       pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3303 		       RXDMA_MONITOR_BUF);
3304 
3305 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3306 		       pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3307 		       RXDMA_MONITOR_DST);
3308 
3309 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3310 		       pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3311 		       RXDMA_MONITOR_STATUS);
3312 
3313 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3314 		       pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3315 		       RXDMA_MONITOR_DESC);
3316 }
3317 #else
3318 static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3319 				  struct dp_pdev *pdev,
3320 				  int mac_id,
3321 				  int mac_for_pdev)
3322 {
3323 }
3324 #endif
3325 /*
3326  * dp_rxdma_ring_config() - configure the RX DMA rings
3327  *
3328  * This function is used to configure the MAC rings.
3329  * On MCL host provides buffers in Host2FW ring
3330  * FW refills (copies) buffers to the ring and updates
3331  * ring_idx in register
3332  *
3333  * @soc: data path SoC handle
3334  *
3335  * Return: void
3336  */
3337 #ifdef QCA_HOST2FW_RXBUF_RING
3338 static void dp_rxdma_ring_config(struct dp_soc *soc)
3339 {
3340 	int i;
3341 
3342 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3343 		struct dp_pdev *pdev = soc->pdev_list[i];
3344 
3345 		if (pdev) {
3346 			int mac_id;
3347 			bool dbs_enable = 0;
3348 			int max_mac_rings =
3349 				 wlan_cfg_get_num_mac_rings
3350 				(pdev->wlan_cfg_ctx);
3351 
3352 			htt_srng_setup(soc->htt_handle, 0,
3353 				 pdev->rx_refill_buf_ring.hal_srng,
3354 				 RXDMA_BUF);
3355 
3356 			if (pdev->rx_refill_buf_ring2.hal_srng)
3357 				htt_srng_setup(soc->htt_handle, 0,
3358 					pdev->rx_refill_buf_ring2.hal_srng,
3359 					RXDMA_BUF);
3360 
3361 			if (soc->cdp_soc.ol_ops->
3362 				is_hw_dbs_2x2_capable) {
3363 				dbs_enable = soc->cdp_soc.ol_ops->
3364 					is_hw_dbs_2x2_capable(soc->ctrl_psoc);
3365 			}
3366 
3367 			if (dbs_enable) {
3368 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3369 				QDF_TRACE_LEVEL_ERROR,
3370 				FL("DBS enabled max_mac_rings %d"),
3371 					 max_mac_rings);
3372 			} else {
3373 				max_mac_rings = 1;
3374 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3375 					 QDF_TRACE_LEVEL_ERROR,
3376 					 FL("DBS disabled, max_mac_rings %d"),
3377 					 max_mac_rings);
3378 			}
3379 
3380 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3381 					 FL("pdev_id %d max_mac_rings %d"),
3382 					 pdev->pdev_id, max_mac_rings);
3383 
3384 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
3385 				int mac_for_pdev = dp_get_mac_id_for_pdev(
3386 							mac_id, pdev->pdev_id);
3387 
3388 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3389 					 QDF_TRACE_LEVEL_ERROR,
3390 					 FL("mac_id %d"), mac_for_pdev);
3391 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3392 					 pdev->rx_mac_buf_ring[mac_id]
3393 						.hal_srng,
3394 					 RXDMA_BUF);
3395 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3396 					pdev->rxdma_err_dst_ring[mac_id]
3397 						.hal_srng,
3398 					RXDMA_DST);
3399 
3400 				/* Configure monitor mode rings */
3401 				dp_mon_htt_srng_setup(soc, pdev, mac_id,
3402 						      mac_for_pdev);
3403 
3404 			}
3405 		}
3406 	}
3407 
3408 	/*
3409 	 * Timer to reap rxdma status rings.
3410 	 * Needed until we enable ppdu end interrupts
3411 	 */
3412 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
3413 			dp_service_mon_rings, (void *)soc,
3414 			QDF_TIMER_TYPE_WAKE_APPS);
3415 	soc->reap_timer_init = 1;
3416 }
3417 #else
3418 /* This is only for WIN */
3419 static void dp_rxdma_ring_config(struct dp_soc *soc)
3420 {
3421 	int i;
3422 	int mac_id;
3423 
3424 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3425 		struct dp_pdev *pdev = soc->pdev_list[i];
3426 
3427 		if (pdev == NULL)
3428 			continue;
3429 
3430 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3431 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
3432 
3433 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3434 				pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
3435 
3436 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3437 				pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3438 				RXDMA_MONITOR_BUF);
3439 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3440 				pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3441 				RXDMA_MONITOR_DST);
3442 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3443 				pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3444 				RXDMA_MONITOR_STATUS);
3445 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3446 				pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3447 				RXDMA_MONITOR_DESC);
3448 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3449 				pdev->rxdma_err_dst_ring[mac_id].hal_srng,
3450 				RXDMA_DST);
3451 		}
3452 	}
3453 }
3454 #endif
3455 
3456 /*
3457  * dp_soc_attach_target_wifi3() - SOC initialization in the target
3458  * @txrx_soc: Datapath SOC handle
3459  */
3460 static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
3461 {
3462 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
3463 
3464 	htt_soc_attach_target(soc->htt_handle);
3465 
3466 	dp_rxdma_ring_config(soc);
3467 
3468 	DP_STATS_INIT(soc);
3469 
3470 	/* initialize work queue for stats processing */
3471 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
3472 
3473 	return 0;
3474 }
3475 
3476 /*
3477  * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
3478  * @txrx_soc: Datapath SOC handle
3479  */
3480 static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
3481 {
3482 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3483 	return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
3484 }
3485 /*
3486  * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
3487  * @txrx_soc: Datapath SOC handle
3488  * @nss_cfg: nss config
3489  */
3490 static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
3491 {
3492 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3493 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
3494 
3495 	wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
3496 
3497 	/*
3498 	 * TODO: masked out based on the per offloaded radio
3499 	 */
3500 	if (config == dp_nss_cfg_dbdc) {
3501 		wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
3502 		wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
3503 		wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
3504 		wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
3505 	}
3506 
3507 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3508 		  FL("nss-wifi<0> nss config is enabled"));
3509 }
3510 /*
3511 * dp_vdev_attach_wifi3() - attach txrx vdev
3512 * @txrx_pdev: Datapath PDEV handle
3513 * @vdev_mac_addr: MAC address of the virtual interface
3514 * @vdev_id: VDEV Id
3515 * @wlan_op_mode: VDEV operating mode
3516 *
3517 * Return: DP VDEV handle on success, NULL on failure
3518 */
3519 static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
3520 	uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
3521 {
3522 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3523 	struct dp_soc *soc = pdev->soc;
3524 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
3525 
3526 	if (!vdev) {
3527 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3528 			FL("DP VDEV memory allocation failed"));
3529 		goto fail0;
3530 	}
3531 
3532 	vdev->pdev = pdev;
3533 	vdev->vdev_id = vdev_id;
3534 	vdev->opmode = op_mode;
3535 	vdev->osdev = soc->osdev;
3536 
3537 	vdev->osif_rx = NULL;
3538 	vdev->osif_rsim_rx_decap = NULL;
3539 	vdev->osif_get_key = NULL;
3540 	vdev->osif_rx_mon = NULL;
3541 	vdev->osif_tx_free_ext = NULL;
3542 	vdev->osif_vdev = NULL;
3543 
3544 	vdev->delete.pending = 0;
3545 	vdev->safemode = 0;
3546 	vdev->drop_unenc = 1;
3547 	vdev->sec_type = cdp_sec_type_none;
3548 #ifdef notyet
3549 	vdev->filters_num = 0;
3550 #endif
3551 
3552 	qdf_mem_copy(
3553 		&vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3554 
3555 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3556 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3557 	vdev->dscp_tid_map_id = 0;
3558 	vdev->mcast_enhancement_en = 0;
3559 
3560 	/* TODO: Initialize default HTT meta data that will be used in
3561 	 * TCL descriptors for packets transmitted from this VDEV
3562 	 */
3563 
3564 	TAILQ_INIT(&vdev->peer_list);
3565 
3566 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3567 	/* add this vdev into the pdev's list */
3568 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
3569 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3570 	pdev->vdev_count++;
3571 
3572 	dp_tx_vdev_attach(vdev);
3573 
3574 
3575 	if ((soc->intr_mode == DP_INTR_POLL) &&
3576 			wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
3577 		if (pdev->vdev_count == 1)
3578 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
3579 	}
3580 
3581 	dp_lro_hash_setup(soc);
3582 
3583 	/* LRO */
3584 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3585 		wlan_op_mode_sta == vdev->opmode)
3586 		vdev->lro_enable = true;
3587 
3588 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3589 		 "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable);
3590 
3591 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3592 		"Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
3593 	DP_STATS_INIT(vdev);
3594 
3595 	if (wlan_op_mode_sta == vdev->opmode)
3596 		dp_peer_create_wifi3((struct cdp_vdev *)vdev,
3597 							vdev->mac_addr.raw,
3598 							NULL);
3599 
3600 	return (struct cdp_vdev *)vdev;
3601 
3602 fail0:
3603 	return NULL;
3604 }
3605 
3606 /**
3607  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
3608  * @vdev: Datapath VDEV handle
3609  * @osif_vdev: OSIF vdev handle
3610  * @ctrl_vdev: UMAC vdev handle
3611  * @txrx_ops: Tx and Rx operations
3612  *
3613  * Return: DP VDEV handle on success, NULL on failure
3614  */
3615 static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
3616 	void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
3617 	struct ol_txrx_ops *txrx_ops)
3618 {
3619 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3620 	vdev->osif_vdev = osif_vdev;
3621 	vdev->ctrl_vdev = ctrl_vdev;
3622 	vdev->osif_rx = txrx_ops->rx.rx;
3623 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
3624 	vdev->osif_get_key = txrx_ops->get_key;
3625 	vdev->osif_rx_mon = txrx_ops->rx.mon;
3626 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
3627 #ifdef notyet
3628 #if ATH_SUPPORT_WAPI
3629 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
3630 #endif
3631 #endif
3632 #ifdef UMAC_SUPPORT_PROXY_ARP
3633 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
3634 #endif
3635 	vdev->me_convert = txrx_ops->me_convert;
3636 
3637 	/* TODO: Enable the following once Tx code is integrated */
3638 	if (vdev->mesh_vdev)
3639 		txrx_ops->tx.tx = dp_tx_send_mesh;
3640 	else
3641 		txrx_ops->tx.tx = dp_tx_send;
3642 
3643 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
3644 
3645 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
3646 		"DP Vdev Register success");
3647 }
3648 
3649 /**
3650  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
3651  * @vdev: Datapath VDEV handle
3652  *
3653  * Return: void
3654  */
3655 static void dp_vdev_flush_peers(struct dp_vdev *vdev)
3656 {
3657 	struct dp_pdev *pdev = vdev->pdev;
3658 	struct dp_soc *soc = pdev->soc;
3659 	struct dp_peer *peer;
3660 	uint16_t *peer_ids;
3661 	uint8_t i = 0, j = 0;
3662 
3663 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
3664 	if (!peer_ids) {
3665 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3666 			"DP alloc failure - unable to flush peers");
3667 		return;
3668 	}
3669 
3670 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3671 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3672 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3673 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
3674 				if (j < soc->max_peers)
3675 					peer_ids[j++] = peer->peer_ids[i];
3676 	}
3677 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3678 
3679 	for (i = 0; i < j ; i++)
3680 		dp_rx_peer_unmap_handler(soc, peer_ids[i]);
3681 
3682 	qdf_mem_free(peer_ids);
3683 
3684 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3685 		FL("Flushed peers for vdev object %pK "), vdev);
3686 }
3687 
3688 /*
3689  * dp_vdev_detach_wifi3() - Detach txrx vdev
3690  * @txrx_vdev:		Datapath VDEV handle
3691  * @callback:		Callback OL_IF on completion of detach
3692  * @cb_context:	Callback context
3693  *
3694  */
3695 static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
3696 	ol_txrx_vdev_delete_cb callback, void *cb_context)
3697 {
3698 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3699 	struct dp_pdev *pdev = vdev->pdev;
3700 	struct dp_soc *soc = pdev->soc;
3701 	struct dp_neighbour_peer *peer = NULL;
3702 
3703 	/* preconditions */
3704 	qdf_assert(vdev);
3705 
3706 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3707 	/* remove the vdev from its parent pdev's list */
3708 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
3709 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3710 
3711 	if (wlan_op_mode_sta == vdev->opmode)
3712 		dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
3713 
3714 	/*
3715 	 * If Target is hung, flush all peers before detaching vdev
3716 	 * this will free all references held due to missing
3717 	 * unmap commands from Target
3718 	 */
3719 	if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
3720 		dp_vdev_flush_peers(vdev);
3721 
3722 	/*
3723 	 * Use peer_ref_mutex while accessing peer_list, in case
3724 	 * a peer is in the process of being removed from the list.
3725 	 */
3726 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3727 	/* check that the vdev has no peers allocated */
3728 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
3729 		/* debug print - will be removed later */
3730 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
3731 			FL("not deleting vdev object %pK (%pM)"
3732 			"until deletion finishes for all its peers"),
3733 			vdev, vdev->mac_addr.raw);
3734 		/* indicate that the vdev needs to be deleted */
3735 		vdev->delete.pending = 1;
3736 		vdev->delete.callback = callback;
3737 		vdev->delete.context = cb_context;
3738 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3739 		return;
3740 	}
3741 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3742 
3743 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3744 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
3745 		      neighbour_peer_list_elem) {
3746 		QDF_ASSERT(peer->vdev != vdev);
3747 	}
3748 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3749 
3750 	dp_tx_vdev_detach(vdev);
3751 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3752 		FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
3753 
3754 	qdf_mem_free(vdev);
3755 
3756 	if (callback)
3757 		callback(cb_context);
3758 }
3759 
3760 /*
3761  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
3762  * @soc - datapath soc handle
3763  * @peer - datapath peer handle
3764  *
3765  * Delete the AST entries belonging to a peer
3766  */
3767 #ifdef FEATURE_AST
3768 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3769 					      struct dp_peer *peer)
3770 {
3771 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
3772 
3773 	qdf_spin_lock_bh(&soc->ast_lock);
3774 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
3775 		dp_peer_del_ast(soc, ast_entry);
3776 
3777 	peer->self_ast_entry = NULL;
3778 	TAILQ_INIT(&peer->ast_entry_list);
3779 	qdf_spin_unlock_bh(&soc->ast_lock);
3780 }
3781 #else
3782 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3783 					      struct dp_peer *peer)
3784 {
3785 }
3786 #endif
3787 
3788 #if ATH_SUPPORT_WRAP
3789 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
3790 						uint8_t *peer_mac_addr)
3791 {
3792 	struct dp_peer *peer;
3793 
3794 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
3795 				      0, vdev->vdev_id);
3796 	if (!peer)
3797 		return NULL;
3798 
3799 	if (peer->bss_peer)
3800 		return peer;
3801 
3802 	qdf_atomic_dec(&peer->ref_cnt);
3803 	return NULL;
3804 }
3805 #else
3806 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
3807 						uint8_t *peer_mac_addr)
3808 {
3809 	struct dp_peer *peer;
3810 
3811 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
3812 				      0, vdev->vdev_id);
3813 	if (!peer)
3814 		return NULL;
3815 
3816 	if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
3817 		return peer;
3818 
3819 	qdf_atomic_dec(&peer->ref_cnt);
3820 	return NULL;
3821 }
3822 #endif
3823 
3824 /*
3825  * dp_peer_create_wifi3() - attach txrx peer
3826  * @txrx_vdev: Datapath VDEV handle
3827  * @peer_mac_addr: Peer MAC address
3828  *
3829  * Return: DP peeer handle on success, NULL on failure
3830  */
3831 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
3832 		uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
3833 {
3834 	struct dp_peer *peer;
3835 	int i;
3836 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3837 	struct dp_pdev *pdev;
3838 	struct dp_soc *soc;
3839 	struct dp_ast_entry *ast_entry;
3840 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
3841 
3842 	/* preconditions */
3843 	qdf_assert(vdev);
3844 	qdf_assert(peer_mac_addr);
3845 
3846 	pdev = vdev->pdev;
3847 	soc = pdev->soc;
3848 
3849 	/*
3850 	 * If a peer entry with given MAC address already exists,
3851 	 * reuse the peer and reset the state of peer.
3852 	 */
3853 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
3854 
3855 	if (peer) {
3856 		peer->delete_in_progress = false;
3857 
3858 		dp_peer_delete_ast_entries(soc, peer);
3859 
3860 		if ((vdev->opmode == wlan_op_mode_sta) &&
3861 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
3862 		     DP_MAC_ADDR_LEN)) {
3863 			ast_type = CDP_TXRX_AST_TYPE_SELF;
3864 		}
3865 
3866 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
3867 
3868 		/*
3869 		* Control path maintains a node count which is incremented
3870 		* for every new peer create command. Since new peer is not being
3871 		* created and earlier reference is reused here,
3872 		* peer_unref_delete event is sent to control path to
3873 		* increment the count back.
3874 		*/
3875 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
3876 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
3877 				vdev->vdev_id, peer->mac_addr.raw);
3878 		}
3879 		peer->ctrl_peer = ctrl_peer;
3880 
3881 		dp_local_peer_id_alloc(pdev, peer);
3882 		DP_STATS_INIT(peer);
3883 
3884 		return (void *)peer;
3885 	} else {
3886 		/*
3887 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
3888 		 * need to remove the AST entry which was earlier added as a WDS
3889 		 * entry.
3890 		 * If an AST entry exists, but no peer entry exists with a given
3891 		 * MAC addresses, we could deduce it as a WDS entry
3892 		 */
3893 		ast_entry = dp_peer_ast_hash_find(soc, peer_mac_addr);
3894 		if (ast_entry)
3895 			dp_peer_del_ast(soc, ast_entry);
3896 	}
3897 
3898 #ifdef notyet
3899 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
3900 		soc->mempool_ol_ath_peer);
3901 #else
3902 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
3903 #endif
3904 
3905 	if (!peer)
3906 		return NULL; /* failure */
3907 
3908 	qdf_mem_zero(peer, sizeof(struct dp_peer));
3909 
3910 	TAILQ_INIT(&peer->ast_entry_list);
3911 
3912 	/* store provided params */
3913 	peer->vdev = vdev;
3914 	peer->ctrl_peer = ctrl_peer;
3915 
3916 	if ((vdev->opmode == wlan_op_mode_sta) &&
3917 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
3918 			 DP_MAC_ADDR_LEN)) {
3919 		ast_type = CDP_TXRX_AST_TYPE_SELF;
3920 	}
3921 
3922 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
3923 
3924 	qdf_spinlock_create(&peer->peer_info_lock);
3925 
3926 	qdf_mem_copy(
3927 		&peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3928 
3929 	/* TODO: See of rx_opt_proc is really required */
3930 	peer->rx_opt_proc = soc->rx_opt_proc;
3931 
3932 	/* initialize the peer_id */
3933 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3934 		peer->peer_ids[i] = HTT_INVALID_PEER;
3935 
3936 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3937 
3938 	qdf_atomic_init(&peer->ref_cnt);
3939 
3940 	/* keep one reference for attach */
3941 	qdf_atomic_inc(&peer->ref_cnt);
3942 
3943 	/* add this peer into the vdev's list */
3944 	if (wlan_op_mode_sta == vdev->opmode)
3945 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
3946 	else
3947 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
3948 
3949 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3950 
3951 	/* TODO: See if hash based search is required */
3952 	dp_peer_find_hash_add(soc, peer);
3953 
3954 	/* Initialize the peer state */
3955 	peer->state = OL_TXRX_PEER_STATE_DISC;
3956 
3957 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3958 		"vdev %pK created peer %pK (%pM) ref_cnt: %d",
3959 		vdev, peer, peer->mac_addr.raw,
3960 		qdf_atomic_read(&peer->ref_cnt));
3961 	/*
3962 	 * For every peer MAp message search and set if bss_peer
3963 	 */
3964 	if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
3965 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3966 			"vdev bss_peer!!!!");
3967 		peer->bss_peer = 1;
3968 		vdev->vap_bss_peer = peer;
3969 	}
3970 	for (i = 0; i < DP_MAX_TIDS; i++)
3971 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
3972 
3973 	dp_local_peer_id_alloc(pdev, peer);
3974 	DP_STATS_INIT(peer);
3975 	return (void *)peer;
3976 }
3977 
3978 /*
3979  * dp_peer_setup_wifi3() - initialize the peer
3980  * @vdev_hdl: virtual device object
3981  * @peer: Peer object
3982  *
3983  * Return: void
3984  */
3985 static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
3986 {
3987 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
3988 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
3989 	struct dp_pdev *pdev;
3990 	struct dp_soc *soc;
3991 	bool hash_based = 0;
3992 	enum cdp_host_reo_dest_ring reo_dest;
3993 
3994 	/* preconditions */
3995 	qdf_assert(vdev);
3996 	qdf_assert(peer);
3997 
3998 	pdev = vdev->pdev;
3999 	soc = pdev->soc;
4000 
4001 	peer->last_assoc_rcvd = 0;
4002 	peer->last_disassoc_rcvd = 0;
4003 	peer->last_deauth_rcvd = 0;
4004 
4005 	/*
4006 	 * hash based steering is disabled for Radios which are offloaded
4007 	 * to NSS
4008 	 */
4009 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
4010 		hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
4011 
4012 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4013 		FL("hash based steering for pdev: %d is %d"),
4014 		pdev->pdev_id, hash_based);
4015 
4016 	/*
4017 	 * Below line of code will ensure the proper reo_dest ring is chosen
4018 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
4019 	 */
4020 	reo_dest = pdev->reo_dest;
4021 
4022 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
4023 		/* TODO: Check the destination ring number to be passed to FW */
4024 		soc->cdp_soc.ol_ops->peer_set_default_routing(
4025 				pdev->ctrl_pdev, peer->mac_addr.raw,
4026 				peer->vdev->vdev_id, hash_based, reo_dest);
4027 	}
4028 
4029 	dp_peer_rx_init(pdev, peer);
4030 	return;
4031 }
4032 
4033 /*
4034  * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
4035  * @vdev_handle: virtual device object
4036  * @htt_pkt_type: type of pkt
4037  *
4038  * Return: void
4039  */
4040 static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
4041 	 enum htt_cmn_pkt_type val)
4042 {
4043 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4044 	vdev->tx_encap_type = val;
4045 }
4046 
4047 /*
4048  * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
4049  * @vdev_handle: virtual device object
4050  * @htt_pkt_type: type of pkt
4051  *
4052  * Return: void
4053  */
4054 static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
4055 	 enum htt_cmn_pkt_type val)
4056 {
4057 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4058 	vdev->rx_decap_type = val;
4059 }
4060 
4061 /*
4062  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
4063  * @pdev_handle: physical device object
4064  * @val: reo destination ring index (1 - 4)
4065  *
4066  * Return: void
4067  */
4068 static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
4069 	 enum cdp_host_reo_dest_ring val)
4070 {
4071 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4072 
4073 	if (pdev)
4074 		pdev->reo_dest = val;
4075 }
4076 
4077 /*
4078  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
4079  * @pdev_handle: physical device object
4080  *
4081  * Return: reo destination ring index
4082  */
4083 static enum cdp_host_reo_dest_ring
4084 dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
4085 {
4086 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4087 
4088 	if (pdev)
4089 		return pdev->reo_dest;
4090 	else
4091 		return cdp_host_reo_dest_ring_unknown;
4092 }
4093 
4094 #ifdef QCA_SUPPORT_SON
4095 static void dp_son_peer_authorize(struct dp_peer *peer)
4096 {
4097 	struct dp_soc *soc;
4098 	soc = peer->vdev->pdev->soc;
4099 	peer->peer_bs_inact_flag = 0;
4100 	peer->peer_bs_inact = soc->pdev_bs_inact_reload;
4101 	return;
4102 }
4103 #else
4104 static void dp_son_peer_authorize(struct dp_peer *peer)
4105 {
4106 	return;
4107 }
4108 #endif
4109 /*
4110  * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
4111  * @pdev_handle: device object
4112  * @val: value to be set
4113  *
4114  * Return: void
4115  */
4116 static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
4117 	 uint32_t val)
4118 {
4119 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4120 
4121 	/* Enable/Disable smart mesh filtering. This flag will be checked
4122 	 * during rx processing to check if packets are from NAC clients.
4123 	 */
4124 	pdev->filter_neighbour_peers = val;
4125 	return 0;
4126 }
4127 
4128 /*
4129  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
4130  * address for smart mesh filtering
4131  * @vdev_handle: virtual device object
4132  * @cmd: Add/Del command
4133  * @macaddr: nac client mac address
4134  *
4135  * Return: void
4136  */
4137 static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
4138 					    uint32_t cmd, uint8_t *macaddr)
4139 {
4140 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4141 	struct dp_pdev *pdev = vdev->pdev;
4142 	struct dp_neighbour_peer *peer = NULL;
4143 
4144 	if (!macaddr)
4145 		goto fail0;
4146 
4147 	/* Store address of NAC (neighbour peer) which will be checked
4148 	 * against TA of received packets.
4149 	 */
4150 	if (cmd == DP_NAC_PARAM_ADD) {
4151 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
4152 				sizeof(*peer));
4153 
4154 		if (!peer) {
4155 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4156 				FL("DP neighbour peer node memory allocation failed"));
4157 			goto fail0;
4158 		}
4159 
4160 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
4161 			macaddr, DP_MAC_ADDR_LEN);
4162 		peer->vdev = vdev;
4163 
4164 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4165 
4166 		/* add this neighbour peer into the list */
4167 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
4168 				neighbour_peer_list_elem);
4169 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4170 
4171 		/* first neighbour */
4172 		if (!pdev->neighbour_peers_added) {
4173 			if (!pdev->mcopy_mode && !pdev->enhanced_stats_en)
4174 				dp_ppdu_ring_cfg(pdev);
4175 			pdev->neighbour_peers_added = true;
4176 		}
4177 		return 1;
4178 
4179 	} else if (cmd == DP_NAC_PARAM_DEL) {
4180 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4181 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
4182 				neighbour_peer_list_elem) {
4183 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
4184 				macaddr, DP_MAC_ADDR_LEN)) {
4185 				/* delete this peer from the list */
4186 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
4187 					peer, neighbour_peer_list_elem);
4188 				qdf_mem_free(peer);
4189 				break;
4190 			}
4191 		}
4192 		/* last neighbour deleted */
4193 		if (TAILQ_EMPTY(&pdev->neighbour_peers_list))
4194 			pdev->neighbour_peers_added = false;
4195 
4196 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4197 
4198 		if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
4199 		    !pdev->enhanced_stats_en)
4200 			dp_ppdu_ring_reset(pdev);
4201 		return 1;
4202 
4203 	}
4204 
4205 fail0:
4206 	return 0;
4207 }
4208 
4209 /*
4210  * dp_get_sec_type() - Get the security type
4211  * @peer:		Datapath peer handle
4212  * @sec_idx:    Security id (mcast, ucast)
4213  *
4214  * return sec_type: Security type
4215  */
4216 static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
4217 {
4218 	struct dp_peer *dpeer = (struct dp_peer *)peer;
4219 
4220 	return dpeer->security[sec_idx].sec_type;
4221 }
4222 
4223 /*
4224  * dp_peer_authorize() - authorize txrx peer
4225  * @peer_handle:		Datapath peer handle
4226  * @authorize
4227  *
4228  */
4229 static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
4230 {
4231 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4232 	struct dp_soc *soc;
4233 
4234 	if (peer != NULL) {
4235 		soc = peer->vdev->pdev->soc;
4236 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
4237 		dp_son_peer_authorize(peer);
4238 		peer->authorize = authorize ? 1 : 0;
4239 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4240 	}
4241 }
4242 
4243 #ifdef QCA_SUPPORT_SON
4244 /*
4245  * dp_txrx_update_inact_threshold() - Update inact timer threshold
4246  * @pdev_handle: Device handle
4247  * @new_threshold : updated threshold value
4248  *
4249  */
4250 static void
4251 dp_txrx_update_inact_threshold(struct cdp_pdev *pdev_handle,
4252 			       u_int16_t new_threshold)
4253 {
4254 	struct dp_vdev *vdev;
4255 	struct dp_peer *peer;
4256 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4257 	struct dp_soc *soc = pdev->soc;
4258 	u_int16_t old_threshold = soc->pdev_bs_inact_reload;
4259 
4260 	if (old_threshold == new_threshold)
4261 		return;
4262 
4263 	soc->pdev_bs_inact_reload = new_threshold;
4264 
4265 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4266 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4267 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4268 		if (vdev->opmode != wlan_op_mode_ap)
4269 			continue;
4270 
4271 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4272 			if (!peer->authorize)
4273 				continue;
4274 
4275 			if (old_threshold - peer->peer_bs_inact >=
4276 					new_threshold) {
4277 				dp_mark_peer_inact((void *)peer, true);
4278 				peer->peer_bs_inact = 0;
4279 			} else {
4280 				peer->peer_bs_inact = new_threshold -
4281 					(old_threshold - peer->peer_bs_inact);
4282 			}
4283 		}
4284 	}
4285 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4286 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4287 }
4288 
4289 /**
4290  * dp_txrx_reset_inact_count(): Reset inact count
4291  * @pdev_handle - device handle
4292  *
4293  * Return: void
4294  */
4295 static void
4296 dp_txrx_reset_inact_count(struct cdp_pdev *pdev_handle)
4297 {
4298 	struct dp_vdev *vdev = NULL;
4299 	struct dp_peer *peer = NULL;
4300 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4301 	struct dp_soc *soc = pdev->soc;
4302 
4303 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4304 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4305 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4306 		if (vdev->opmode != wlan_op_mode_ap)
4307 			continue;
4308 
4309 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4310 			if (!peer->authorize)
4311 				continue;
4312 
4313 			peer->peer_bs_inact = soc->pdev_bs_inact_reload;
4314 		}
4315 	}
4316 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4317 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4318 }
4319 
4320 /**
4321  * dp_set_inact_params(): set inactivity params
4322  * @pdev_handle - device handle
4323  * @inact_check_interval - inactivity interval
4324  * @inact_normal - Inactivity normal
4325  * @inact_overload - Inactivity overload
4326  *
4327  * Return: bool
4328  */
4329 bool dp_set_inact_params(struct cdp_pdev *pdev_handle,
4330 			 u_int16_t inact_check_interval,
4331 			 u_int16_t inact_normal, u_int16_t inact_overload)
4332 {
4333 	struct dp_soc *soc;
4334 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4335 
4336 	if (!pdev)
4337 		return false;
4338 
4339 	soc = pdev->soc;
4340 	if (!soc)
4341 		return false;
4342 
4343 	soc->pdev_bs_inact_interval = inact_check_interval;
4344 	soc->pdev_bs_inact_normal = inact_normal;
4345 	soc->pdev_bs_inact_overload = inact_overload;
4346 
4347 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4348 					soc->pdev_bs_inact_normal);
4349 
4350 	return true;
4351 }
4352 
4353 /**
4354  * dp_start_inact_timer(): Inactivity timer start
4355  * @pdev_handle - device handle
4356  * @enable - Inactivity timer start/stop
4357  *
4358  * Return: bool
4359  */
4360 bool dp_start_inact_timer(struct cdp_pdev *pdev_handle, bool enable)
4361 {
4362 	struct dp_soc *soc;
4363 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4364 
4365 	if (!pdev)
4366 		return false;
4367 
4368 	soc = pdev->soc;
4369 	if (!soc)
4370 		return false;
4371 
4372 	if (enable) {
4373 		dp_txrx_reset_inact_count((struct cdp_pdev *)pdev);
4374 		qdf_timer_mod(&soc->pdev_bs_inact_timer,
4375 			      soc->pdev_bs_inact_interval * 1000);
4376 	} else {
4377 		qdf_timer_stop(&soc->pdev_bs_inact_timer);
4378 	}
4379 
4380 	return true;
4381 }
4382 
4383 /**
4384  * dp_set_overload(): Set inactivity overload
4385  * @pdev_handle - device handle
4386  * @overload - overload status
4387  *
4388  * Return: void
4389  */
4390 void dp_set_overload(struct cdp_pdev *pdev_handle, bool overload)
4391 {
4392 	struct dp_soc *soc;
4393 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4394 
4395 	if (!pdev)
4396 		return;
4397 
4398 	soc = pdev->soc;
4399 	if (!soc)
4400 		return;
4401 
4402 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4403 			overload ? soc->pdev_bs_inact_overload :
4404 			soc->pdev_bs_inact_normal);
4405 }
4406 
4407 /**
4408  * dp_peer_is_inact(): check whether peer is inactive
4409  * @peer_handle - datapath peer handle
4410  *
4411  * Return: bool
4412  */
4413 bool dp_peer_is_inact(void *peer_handle)
4414 {
4415 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4416 
4417 	if (!peer)
4418 		return false;
4419 
4420 	return peer->peer_bs_inact_flag == 1;
4421 }
4422 
4423 /**
4424  * dp_init_inact_timer: initialize the inact timer
4425  * @soc - SOC handle
4426  *
4427  * Return: void
4428  */
4429 void dp_init_inact_timer(struct dp_soc *soc)
4430 {
4431 	qdf_timer_init(soc->osdev, &soc->pdev_bs_inact_timer,
4432 		dp_txrx_peer_find_inact_timeout_handler,
4433 		(void *)soc, QDF_TIMER_TYPE_WAKE_APPS);
4434 }
4435 
4436 #else
4437 
4438 bool dp_set_inact_params(struct cdp_pdev *pdev, u_int16_t inact_check_interval,
4439 			 u_int16_t inact_normal, u_int16_t inact_overload)
4440 {
4441 	return false;
4442 }
4443 
4444 bool dp_start_inact_timer(struct cdp_pdev *pdev, bool enable)
4445 {
4446 	return false;
4447 }
4448 
4449 void dp_set_overload(struct cdp_pdev *pdev, bool overload)
4450 {
4451 	return;
4452 }
4453 
4454 void dp_init_inact_timer(struct dp_soc *soc)
4455 {
4456 	return;
4457 }
4458 
4459 bool dp_peer_is_inact(void *peer)
4460 {
4461 	return false;
4462 }
4463 #endif
4464 
4465 /*
4466  * dp_peer_unref_delete() - unref and delete peer
4467  * @peer_handle:		Datapath peer handle
4468  *
4469  */
4470 void dp_peer_unref_delete(void *peer_handle)
4471 {
4472 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4473 	struct dp_peer *bss_peer = NULL;
4474 	struct dp_vdev *vdev = peer->vdev;
4475 	struct dp_pdev *pdev = vdev->pdev;
4476 	struct dp_soc *soc = pdev->soc;
4477 	struct dp_peer *tmppeer;
4478 	int found = 0;
4479 	uint16_t peer_id;
4480 	uint16_t vdev_id;
4481 
4482 	/*
4483 	 * Hold the lock all the way from checking if the peer ref count
4484 	 * is zero until the peer references are removed from the hash
4485 	 * table and vdev list (if the peer ref count is zero).
4486 	 * This protects against a new HL tx operation starting to use the
4487 	 * peer object just after this function concludes it's done being used.
4488 	 * Furthermore, the lock needs to be held while checking whether the
4489 	 * vdev's list of peers is empty, to make sure that list is not modified
4490 	 * concurrently with the empty check.
4491 	 */
4492 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4493 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4494 		  "%s: peer %pK ref_cnt(before decrement): %d", __func__,
4495 		  peer, qdf_atomic_read(&peer->ref_cnt));
4496 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
4497 		peer_id = peer->peer_ids[0];
4498 		vdev_id = vdev->vdev_id;
4499 
4500 		/*
4501 		 * Make sure that the reference to the peer in
4502 		 * peer object map is removed
4503 		 */
4504 		if (peer_id != HTT_INVALID_PEER)
4505 			soc->peer_id_to_obj_map[peer_id] = NULL;
4506 
4507 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4508 			"Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
4509 
4510 		/* remove the reference to the peer from the hash table */
4511 		dp_peer_find_hash_remove(soc, peer);
4512 
4513 		qdf_spin_lock_bh(&soc->ast_lock);
4514 		if (peer->self_ast_entry) {
4515 			dp_peer_del_ast(soc, peer->self_ast_entry);
4516 			peer->self_ast_entry = NULL;
4517 		}
4518 		qdf_spin_unlock_bh(&soc->ast_lock);
4519 
4520 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
4521 			if (tmppeer == peer) {
4522 				found = 1;
4523 				break;
4524 			}
4525 		}
4526 		if (found) {
4527 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
4528 				peer_list_elem);
4529 		} else {
4530 			/*Ignoring the remove operation as peer not found*/
4531 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
4532 				"peer %pK not found in vdev (%pK)->peer_list:%pK",
4533 				peer, vdev, &peer->vdev->peer_list);
4534 		}
4535 
4536 		/* cleanup the peer data */
4537 		dp_peer_cleanup(vdev, peer);
4538 
4539 		/* check whether the parent vdev has no peers left */
4540 		if (TAILQ_EMPTY(&vdev->peer_list)) {
4541 			/*
4542 			 * Now that there are no references to the peer, we can
4543 			 * release the peer reference lock.
4544 			 */
4545 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4546 			/*
4547 			 * Check if the parent vdev was waiting for its peers
4548 			 * to be deleted, in order for it to be deleted too.
4549 			 */
4550 			if (vdev->delete.pending) {
4551 				ol_txrx_vdev_delete_cb vdev_delete_cb =
4552 					vdev->delete.callback;
4553 				void *vdev_delete_context =
4554 					vdev->delete.context;
4555 
4556 				QDF_TRACE(QDF_MODULE_ID_DP,
4557 					QDF_TRACE_LEVEL_INFO_HIGH,
4558 					FL("deleting vdev object %pK (%pM)"
4559 					" - its last peer is done"),
4560 					vdev, vdev->mac_addr.raw);
4561 				/* all peers are gone, go ahead and delete it */
4562 				dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
4563 								FLOW_TYPE_VDEV,
4564 								vdev_id);
4565 				dp_tx_vdev_detach(vdev);
4566 				QDF_TRACE(QDF_MODULE_ID_DP,
4567 					QDF_TRACE_LEVEL_INFO_HIGH,
4568 					FL("deleting vdev object %pK (%pM)"),
4569 					vdev, vdev->mac_addr.raw);
4570 
4571 				qdf_mem_free(vdev);
4572 				vdev = NULL;
4573 				if (vdev_delete_cb)
4574 					vdev_delete_cb(vdev_delete_context);
4575 			}
4576 		} else {
4577 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4578 		}
4579 
4580 		if (vdev) {
4581 			if (vdev->vap_bss_peer == peer) {
4582 				vdev->vap_bss_peer = NULL;
4583 			}
4584 		}
4585 
4586 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
4587 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
4588 					vdev_id, peer->mac_addr.raw);
4589 		}
4590 
4591 		if (!vdev || !vdev->vap_bss_peer) {
4592 			goto free_peer;
4593 		}
4594 
4595 #ifdef notyet
4596 		qdf_mempool_free(soc->osdev, soc->mempool_ol_ath_peer, peer);
4597 #else
4598 		bss_peer = vdev->vap_bss_peer;
4599 		DP_UPDATE_STATS(vdev, peer);
4600 
4601 free_peer:
4602 		qdf_mem_free(peer);
4603 
4604 #endif
4605 	} else {
4606 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4607 	}
4608 }
4609 
4610 /*
4611  * dp_peer_detach_wifi3() – Detach txrx peer
4612  * @peer_handle: Datapath peer handle
4613  * @bitmap: bitmap indicating special handling of request.
4614  *
4615  */
4616 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
4617 {
4618 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4619 
4620 	/* redirect the peer's rx delivery function to point to a
4621 	 * discard func
4622 	 */
4623 
4624 	peer->rx_opt_proc = dp_rx_discard;
4625 	peer->ctrl_peer = NULL;
4626 
4627 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4628 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
4629 
4630 	dp_local_peer_id_free(peer->vdev->pdev, peer);
4631 	qdf_spinlock_destroy(&peer->peer_info_lock);
4632 
4633 	/*
4634 	 * Remove the reference added during peer_attach.
4635 	 * The peer will still be left allocated until the
4636 	 * PEER_UNMAP message arrives to remove the other
4637 	 * reference, added by the PEER_MAP message.
4638 	 */
4639 	dp_peer_unref_delete(peer_handle);
4640 }
4641 
4642 /*
4643  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
4644  * @peer_handle:		Datapath peer handle
4645  *
4646  */
4647 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
4648 {
4649 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4650 	return vdev->mac_addr.raw;
4651 }
4652 
4653 /*
4654  * dp_vdev_set_wds() - Enable per packet stats
4655  * @vdev_handle: DP VDEV handle
4656  * @val: value
4657  *
4658  * Return: none
4659  */
4660 static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
4661 {
4662 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4663 
4664 	vdev->wds_enabled = val;
4665 	return 0;
4666 }
4667 
4668 /*
4669  * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
4670  * @peer_handle:		Datapath peer handle
4671  *
4672  */
4673 static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
4674 						uint8_t vdev_id)
4675 {
4676 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
4677 	struct dp_vdev *vdev = NULL;
4678 
4679 	if (qdf_unlikely(!pdev))
4680 		return NULL;
4681 
4682 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4683 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4684 		if (vdev->vdev_id == vdev_id)
4685 			break;
4686 	}
4687 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4688 
4689 	return (struct cdp_vdev *)vdev;
4690 }
4691 
4692 static int dp_get_opmode(struct cdp_vdev *vdev_handle)
4693 {
4694 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4695 
4696 	return vdev->opmode;
4697 }
4698 
4699 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
4700 {
4701 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4702 	struct dp_pdev *pdev = vdev->pdev;
4703 
4704 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
4705 }
4706 
4707 /**
4708  * dp_reset_monitor_mode() - Disable monitor mode
4709  * @pdev_handle: Datapath PDEV handle
4710  *
4711  * Return: 0 on success, not 0 on failure
4712  */
4713 static int dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
4714 {
4715 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4716 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4717 	struct dp_soc *soc = pdev->soc;
4718 	uint8_t pdev_id;
4719 	int mac_id;
4720 
4721 	pdev_id = pdev->pdev_id;
4722 	soc = pdev->soc;
4723 
4724 	qdf_spin_lock_bh(&pdev->mon_lock);
4725 
4726 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4727 
4728 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4729 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4730 
4731 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4732 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4733 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4734 
4735 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4736 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4737 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4738 	}
4739 
4740 	pdev->monitor_vdev = NULL;
4741 
4742 	qdf_spin_unlock_bh(&pdev->mon_lock);
4743 
4744 	return 0;
4745 }
4746 
4747 /**
4748  * dp_set_nac() - set peer_nac
4749  * @peer_handle: Datapath PEER handle
4750  *
4751  * Return: void
4752  */
4753 static void dp_set_nac(struct cdp_peer *peer_handle)
4754 {
4755 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4756 
4757 	peer->nac = 1;
4758 }
4759 
4760 /**
4761  * dp_get_tx_pending() - read pending tx
4762  * @pdev_handle: Datapath PDEV handle
4763  *
4764  * Return: outstanding tx
4765  */
4766 static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
4767 {
4768 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4769 
4770 	return qdf_atomic_read(&pdev->num_tx_outstanding);
4771 }
4772 
4773 /**
4774  * dp_get_peer_mac_from_peer_id() - get peer mac
4775  * @pdev_handle: Datapath PDEV handle
4776  * @peer_id: Peer ID
4777  * @peer_mac: MAC addr of PEER
4778  *
4779  * Return: void
4780  */
4781 static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
4782 	uint32_t peer_id, uint8_t *peer_mac)
4783 {
4784 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4785 	struct dp_peer *peer;
4786 
4787 	if (pdev && peer_mac) {
4788 		peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
4789 		if (peer && peer->mac_addr.raw) {
4790 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
4791 					DP_MAC_ADDR_LEN);
4792 		}
4793 	}
4794 }
4795 
4796 /**
4797  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
4798  * @vdev_handle: Datapath VDEV handle
4799  * @smart_monitor: Flag to denote if its smart monitor mode
4800  *
4801  * Return: 0 on success, not 0 on failure
4802  */
4803 static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
4804 		uint8_t smart_monitor)
4805 {
4806 	/* Many monitor VAPs can exists in a system but only one can be up at
4807 	 * anytime
4808 	 */
4809 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4810 	struct dp_pdev *pdev;
4811 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4812 	struct dp_soc *soc;
4813 	uint8_t pdev_id;
4814 	int mac_id;
4815 
4816 	qdf_assert(vdev);
4817 
4818 	pdev = vdev->pdev;
4819 	pdev_id = pdev->pdev_id;
4820 	soc = pdev->soc;
4821 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4822 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
4823 		pdev, pdev_id, soc, vdev);
4824 
4825 	/*Check if current pdev's monitor_vdev exists */
4826 	if (pdev->monitor_vdev) {
4827 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4828 			"vdev=%pK", vdev);
4829 		qdf_assert(vdev);
4830 	}
4831 
4832 	pdev->monitor_vdev = vdev;
4833 
4834 	/* If smart monitor mode, do not configure monitor ring */
4835 	if (smart_monitor)
4836 		return QDF_STATUS_SUCCESS;
4837 
4838 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4839 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
4840 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4841 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4842 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4843 		pdev->mo_data_filter);
4844 
4845 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4846 
4847 	htt_tlv_filter.mpdu_start = 1;
4848 	htt_tlv_filter.msdu_start = 1;
4849 	htt_tlv_filter.packet = 1;
4850 	htt_tlv_filter.msdu_end = 1;
4851 	htt_tlv_filter.mpdu_end = 1;
4852 	htt_tlv_filter.packet_header = 1;
4853 	htt_tlv_filter.attention = 1;
4854 	htt_tlv_filter.ppdu_start = 0;
4855 	htt_tlv_filter.ppdu_end = 0;
4856 	htt_tlv_filter.ppdu_end_user_stats = 0;
4857 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4858 	htt_tlv_filter.ppdu_end_status_done = 0;
4859 	htt_tlv_filter.header_per_msdu = 1;
4860 	htt_tlv_filter.enable_fp =
4861 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4862 	htt_tlv_filter.enable_md = 0;
4863 	htt_tlv_filter.enable_mo =
4864 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4865 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4866 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4867 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4868 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4869 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4870 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4871 
4872 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4873 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4874 
4875 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4876 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4877 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4878 	}
4879 
4880 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4881 
4882 	htt_tlv_filter.mpdu_start = 1;
4883 	htt_tlv_filter.msdu_start = 0;
4884 	htt_tlv_filter.packet = 0;
4885 	htt_tlv_filter.msdu_end = 0;
4886 	htt_tlv_filter.mpdu_end = 0;
4887 	htt_tlv_filter.attention = 0;
4888 	htt_tlv_filter.ppdu_start = 1;
4889 	htt_tlv_filter.ppdu_end = 1;
4890 	htt_tlv_filter.ppdu_end_user_stats = 1;
4891 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4892 	htt_tlv_filter.ppdu_end_status_done = 1;
4893 	htt_tlv_filter.enable_fp = 1;
4894 	htt_tlv_filter.enable_md = 0;
4895 	htt_tlv_filter.enable_mo = 1;
4896 	if (pdev->mcopy_mode) {
4897 		htt_tlv_filter.packet_header = 1;
4898 	}
4899 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
4900 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
4901 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
4902 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
4903 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
4904 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
4905 
4906 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4907 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4908 						pdev->pdev_id);
4909 
4910 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4911 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4912 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4913 	}
4914 
4915 	return QDF_STATUS_SUCCESS;
4916 }
4917 
4918 /**
4919  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
4920  * @pdev_handle: Datapath PDEV handle
4921  * @filter_val: Flag to select Filter for monitor mode
4922  * Return: 0 on success, not 0 on failure
4923  */
4924 static int dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
4925 	struct cdp_monitor_filter *filter_val)
4926 {
4927 	/* Many monitor VAPs can exists in a system but only one can be up at
4928 	 * anytime
4929 	 */
4930 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4931 	struct dp_vdev *vdev = pdev->monitor_vdev;
4932 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4933 	struct dp_soc *soc;
4934 	uint8_t pdev_id;
4935 	int mac_id;
4936 
4937 	pdev_id = pdev->pdev_id;
4938 	soc = pdev->soc;
4939 
4940 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4941 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
4942 		pdev, pdev_id, soc, vdev);
4943 
4944 	/*Check if current pdev's monitor_vdev exists */
4945 	if (!pdev->monitor_vdev) {
4946 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4947 			"vdev=%pK", vdev);
4948 		qdf_assert(vdev);
4949 	}
4950 
4951 	/* update filter mode, type in pdev structure */
4952 	pdev->mon_filter_mode = filter_val->mode;
4953 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
4954 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
4955 	pdev->fp_data_filter = filter_val->fp_data;
4956 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
4957 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
4958 	pdev->mo_data_filter = filter_val->mo_data;
4959 
4960 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4961 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
4962 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4963 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4964 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4965 		pdev->mo_data_filter);
4966 
4967 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4968 
4969 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4970 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4971 
4972 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4973 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4974 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4975 
4976 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4977 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4978 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4979 	}
4980 
4981 	htt_tlv_filter.mpdu_start = 1;
4982 	htt_tlv_filter.msdu_start = 1;
4983 	htt_tlv_filter.packet = 1;
4984 	htt_tlv_filter.msdu_end = 1;
4985 	htt_tlv_filter.mpdu_end = 1;
4986 	htt_tlv_filter.packet_header = 1;
4987 	htt_tlv_filter.attention = 1;
4988 	htt_tlv_filter.ppdu_start = 0;
4989 	htt_tlv_filter.ppdu_end = 0;
4990 	htt_tlv_filter.ppdu_end_user_stats = 0;
4991 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4992 	htt_tlv_filter.ppdu_end_status_done = 0;
4993 	htt_tlv_filter.header_per_msdu = 1;
4994 	htt_tlv_filter.enable_fp =
4995 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4996 	htt_tlv_filter.enable_md = 0;
4997 	htt_tlv_filter.enable_mo =
4998 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4999 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
5000 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
5001 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
5002 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
5003 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
5004 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
5005 
5006 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5007 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5008 
5009 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5010 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
5011 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
5012 	}
5013 
5014 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5015 
5016 	htt_tlv_filter.mpdu_start = 1;
5017 	htt_tlv_filter.msdu_start = 0;
5018 	htt_tlv_filter.packet = 0;
5019 	htt_tlv_filter.msdu_end = 0;
5020 	htt_tlv_filter.mpdu_end = 0;
5021 	htt_tlv_filter.attention = 0;
5022 	htt_tlv_filter.ppdu_start = 1;
5023 	htt_tlv_filter.ppdu_end = 1;
5024 	htt_tlv_filter.ppdu_end_user_stats = 1;
5025 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5026 	htt_tlv_filter.ppdu_end_status_done = 1;
5027 	htt_tlv_filter.enable_fp = 1;
5028 	htt_tlv_filter.enable_md = 0;
5029 	htt_tlv_filter.enable_mo = 1;
5030 	if (pdev->mcopy_mode) {
5031 		htt_tlv_filter.packet_header = 1;
5032 	}
5033 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5034 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5035 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5036 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5037 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5038 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
5039 
5040 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5041 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5042 						pdev->pdev_id);
5043 
5044 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5045 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5046 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5047 	}
5048 
5049 	return QDF_STATUS_SUCCESS;
5050 }
5051 
5052 /**
5053  * dp_get_pdev_id_frm_pdev() - get pdev_id
5054  * @pdev_handle: Datapath PDEV handle
5055  *
5056  * Return: pdev_id
5057  */
5058 static
5059 uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
5060 {
5061 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5062 
5063 	return pdev->pdev_id;
5064 }
5065 
5066 /**
5067  * dp_pdev_set_chan_noise_floor() - set channel noise floor
5068  * @pdev_handle: Datapath PDEV handle
5069  * @chan_noise_floor: Channel Noise Floor
5070  *
5071  * Return: void
5072  */
5073 static
5074 void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
5075 				  int16_t chan_noise_floor)
5076 {
5077 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5078 
5079 	pdev->chan_noise_floor = chan_noise_floor;
5080 }
5081 
5082 /**
5083  * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
5084  * @vdev_handle: Datapath VDEV handle
5085  * Return: true on ucast filter flag set
5086  */
5087 static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
5088 {
5089 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5090 	struct dp_pdev *pdev;
5091 
5092 	pdev = vdev->pdev;
5093 
5094 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
5095 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
5096 		return true;
5097 
5098 	return false;
5099 }
5100 
5101 /**
5102  * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
5103  * @vdev_handle: Datapath VDEV handle
5104  * Return: true on mcast filter flag set
5105  */
5106 static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
5107 {
5108 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5109 	struct dp_pdev *pdev;
5110 
5111 	pdev = vdev->pdev;
5112 
5113 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
5114 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
5115 		return true;
5116 
5117 	return false;
5118 }
5119 
5120 /**
5121  * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
5122  * @vdev_handle: Datapath VDEV handle
5123  * Return: true on non data filter flag set
5124  */
5125 static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
5126 {
5127 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5128 	struct dp_pdev *pdev;
5129 
5130 	pdev = vdev->pdev;
5131 
5132 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
5133 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
5134 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
5135 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
5136 			return true;
5137 		}
5138 	}
5139 
5140 	return false;
5141 }
5142 
5143 #ifdef MESH_MODE_SUPPORT
5144 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
5145 {
5146 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5147 
5148 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5149 		FL("val %d"), val);
5150 	vdev->mesh_vdev = val;
5151 }
5152 
5153 /*
5154  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
5155  * @vdev_hdl: virtual device object
5156  * @val: value to be set
5157  *
5158  * Return: void
5159  */
5160 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
5161 {
5162 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5163 
5164 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5165 		FL("val %d"), val);
5166 	vdev->mesh_rx_filter = val;
5167 }
5168 #endif
5169 
5170 /*
5171  * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
5172  * Current scope is bar received count
5173  *
5174  * @pdev_handle: DP_PDEV handle
5175  *
5176  * Return: void
5177  */
5178 #define STATS_PROC_TIMEOUT        (HZ/1000)
5179 
5180 static void
5181 dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
5182 {
5183 	struct dp_vdev *vdev;
5184 	struct dp_peer *peer;
5185 	uint32_t waitcnt;
5186 
5187 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5188 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5189 			if (!peer) {
5190 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5191 					FL("DP Invalid Peer refernce"));
5192 				return;
5193 			}
5194 
5195 			if (peer->delete_in_progress) {
5196 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5197 					FL("DP Peer deletion in progress"));
5198 				continue;
5199 			}
5200 
5201 			qdf_atomic_inc(&peer->ref_cnt);
5202 			waitcnt = 0;
5203 			dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
5204 			while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
5205 				&& waitcnt < 10) {
5206 				schedule_timeout_interruptible(
5207 						STATS_PROC_TIMEOUT);
5208 				waitcnt++;
5209 			}
5210 			qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
5211 			dp_peer_unref_delete(peer);
5212 		}
5213 	}
5214 }
5215 
5216 /**
5217  * dp_rx_bar_stats_cb(): BAR received stats callback
5218  * @soc: SOC handle
5219  * @cb_ctxt: Call back context
5220  * @reo_status: Reo status
5221  *
5222  * return: void
5223  */
5224 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
5225 	union hal_reo_status *reo_status)
5226 {
5227 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
5228 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
5229 
5230 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
5231 		DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
5232 			queue_status->header.status);
5233 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
5234 		return;
5235 	}
5236 
5237 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
5238 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
5239 
5240 }
5241 
5242 /**
5243  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
5244  * @vdev: DP VDEV handle
5245  *
5246  * return: void
5247  */
5248 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
5249 			     struct cdp_vdev_stats *vdev_stats)
5250 {
5251 	struct dp_peer *peer = NULL;
5252 	struct dp_soc *soc = vdev->pdev->soc;
5253 
5254 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
5255 
5256 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
5257 		dp_update_vdev_stats(vdev_stats, peer);
5258 
5259 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5260 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
5261 			&vdev->stats, (uint16_t) vdev->vdev_id,
5262 			UPDATE_VDEV_STATS);
5263 
5264 }
5265 
5266 /**
5267  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
5268  * @pdev: DP PDEV handle
5269  *
5270  * return: void
5271  */
5272 static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
5273 {
5274 	struct dp_vdev *vdev = NULL;
5275 	struct dp_soc *soc = pdev->soc;
5276 	struct cdp_vdev_stats *vdev_stats =
5277 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
5278 
5279 	if (!vdev_stats) {
5280 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5281 			  "DP alloc failure - unable to get alloc vdev stats");
5282 		return;
5283 	}
5284 
5285 	qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
5286 	qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
5287 	qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
5288 
5289 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5290 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5291 
5292 		dp_aggregate_vdev_stats(vdev, vdev_stats);
5293 		dp_update_pdev_stats(pdev, vdev_stats);
5294 
5295 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
5296 
5297 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
5298 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
5299 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
5300 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
5301 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
5302 		DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
5303 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
5304 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host.num);
5305 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
5306 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host.num);
5307 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
5308 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
5309 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
5310 		DP_STATS_AGGR(pdev, vdev,
5311 				tx_i.mcast_en.dropped_map_error);
5312 		DP_STATS_AGGR(pdev, vdev,
5313 				tx_i.mcast_en.dropped_self_mac);
5314 		DP_STATS_AGGR(pdev, vdev,
5315 				tx_i.mcast_en.dropped_send_fail);
5316 		DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
5317 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
5318 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
5319 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
5320 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na.num);
5321 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
5322 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified);
5323 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw);
5324 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.exception_fw);
5325 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.completion_fw);
5326 
5327 		pdev->stats.tx_i.dropped.dropped_pkt.num =
5328 			pdev->stats.tx_i.dropped.dma_error +
5329 			pdev->stats.tx_i.dropped.ring_full +
5330 			pdev->stats.tx_i.dropped.enqueue_fail +
5331 			pdev->stats.tx_i.dropped.desc_na.num +
5332 			pdev->stats.tx_i.dropped.res_full;
5333 
5334 		pdev->stats.tx.last_ack_rssi =
5335 			vdev->stats.tx.last_ack_rssi;
5336 		pdev->stats.tx_i.tso.num_seg =
5337 			vdev->stats.tx_i.tso.num_seg;
5338 	}
5339 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5340 	qdf_mem_free(vdev_stats);
5341 
5342 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5343 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
5344 				&pdev->stats, pdev->pdev_id, UPDATE_PDEV_STATS);
5345 
5346 }
5347 
5348 /**
5349  * dp_vdev_getstats() - get vdev packet level stats
5350  * @vdev_handle: Datapath VDEV handle
5351  * @stats: cdp network device stats structure
5352  *
5353  * Return: void
5354  */
5355 static void dp_vdev_getstats(void *vdev_handle,
5356 		struct cdp_dev_stats *stats)
5357 {
5358 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5359 	struct cdp_vdev_stats *vdev_stats =
5360 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
5361 
5362 	if (!vdev_stats) {
5363 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5364 			  "DP alloc failure - unable to get alloc vdev stats");
5365 		return;
5366 	}
5367 
5368 	dp_aggregate_vdev_stats(vdev, vdev_stats);
5369 
5370 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
5371 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
5372 
5373 	stats->tx_errors = vdev_stats->tx.tx_failed +
5374 		vdev_stats->tx_i.dropped.dropped_pkt.num;
5375 	stats->tx_dropped = stats->tx_errors;
5376 
5377 	stats->rx_packets = vdev_stats->rx.unicast.num +
5378 		vdev_stats->rx.multicast.num +
5379 		vdev_stats->rx.bcast.num;
5380 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
5381 		vdev_stats->rx.multicast.bytes +
5382 		vdev_stats->rx.bcast.bytes;
5383 
5384 }
5385 
5386 
5387 /**
5388  * dp_pdev_getstats() - get pdev packet level stats
5389  * @pdev_handle: Datapath PDEV handle
5390  * @stats: cdp network device stats structure
5391  *
5392  * Return: void
5393  */
5394 static void dp_pdev_getstats(void *pdev_handle,
5395 		struct cdp_dev_stats *stats)
5396 {
5397 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5398 
5399 	dp_aggregate_pdev_stats(pdev);
5400 
5401 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
5402 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
5403 
5404 	stats->tx_errors = pdev->stats.tx.tx_failed +
5405 		pdev->stats.tx_i.dropped.dropped_pkt.num;
5406 	stats->tx_dropped = stats->tx_errors;
5407 
5408 	stats->rx_packets = pdev->stats.rx.unicast.num +
5409 		pdev->stats.rx.multicast.num +
5410 		pdev->stats.rx.bcast.num;
5411 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
5412 		pdev->stats.rx.multicast.bytes +
5413 		pdev->stats.rx.bcast.bytes;
5414 }
5415 
5416 /**
5417  * dp_get_device_stats() - get interface level packet stats
5418  * @handle: device handle
5419  * @stats: cdp network device stats structure
5420  * @type: device type pdev/vdev
5421  *
5422  * Return: void
5423  */
5424 static void dp_get_device_stats(void *handle,
5425 		struct cdp_dev_stats *stats, uint8_t type)
5426 {
5427 	switch (type) {
5428 	case UPDATE_VDEV_STATS:
5429 		dp_vdev_getstats(handle, stats);
5430 		break;
5431 	case UPDATE_PDEV_STATS:
5432 		dp_pdev_getstats(handle, stats);
5433 		break;
5434 	default:
5435 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5436 			"apstats cannot be updated for this input "
5437 			"type %d", type);
5438 		break;
5439 	}
5440 
5441 }
5442 
5443 
5444 /**
5445  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
5446  * @pdev: DP_PDEV Handle
5447  *
5448  * Return:void
5449  */
5450 static inline void
5451 dp_print_pdev_tx_stats(struct dp_pdev *pdev)
5452 {
5453 	uint8_t index = 0;
5454 	DP_PRINT_STATS("PDEV Tx Stats:\n");
5455 	DP_PRINT_STATS("Received From Stack:");
5456 	DP_PRINT_STATS("	Packets = %d",
5457 			pdev->stats.tx_i.rcvd.num);
5458 	DP_PRINT_STATS("	Bytes = %llu",
5459 			pdev->stats.tx_i.rcvd.bytes);
5460 	DP_PRINT_STATS("Processed:");
5461 	DP_PRINT_STATS("	Packets = %d",
5462 			pdev->stats.tx_i.processed.num);
5463 	DP_PRINT_STATS("	Bytes = %llu",
5464 			pdev->stats.tx_i.processed.bytes);
5465 	DP_PRINT_STATS("Total Completions:");
5466 	DP_PRINT_STATS("	Packets = %u",
5467 			pdev->stats.tx.comp_pkt.num);
5468 	DP_PRINT_STATS("	Bytes = %llu",
5469 			pdev->stats.tx.comp_pkt.bytes);
5470 	DP_PRINT_STATS("Successful Completions:");
5471 	DP_PRINT_STATS("	Packets = %u",
5472 			pdev->stats.tx.tx_success.num);
5473 	DP_PRINT_STATS("	Bytes = %llu",
5474 			pdev->stats.tx.tx_success.bytes);
5475 	DP_PRINT_STATS("Dropped:");
5476 	DP_PRINT_STATS("	Total = %d",
5477 			pdev->stats.tx_i.dropped.dropped_pkt.num);
5478 	DP_PRINT_STATS("	Dma_map_error = %d",
5479 			pdev->stats.tx_i.dropped.dma_error);
5480 	DP_PRINT_STATS("	Ring Full = %d",
5481 			pdev->stats.tx_i.dropped.ring_full);
5482 	DP_PRINT_STATS("	Descriptor Not available = %d",
5483 			pdev->stats.tx_i.dropped.desc_na.num);
5484 	DP_PRINT_STATS("	HW enqueue failed= %d",
5485 			pdev->stats.tx_i.dropped.enqueue_fail);
5486 	DP_PRINT_STATS("	Resources Full = %d",
5487 			pdev->stats.tx_i.dropped.res_full);
5488 	DP_PRINT_STATS("	FW removed = %d",
5489 			pdev->stats.tx.dropped.fw_rem);
5490 	DP_PRINT_STATS("	FW removed transmitted = %d",
5491 			pdev->stats.tx.dropped.fw_rem_tx);
5492 	DP_PRINT_STATS("	FW removed untransmitted = %d",
5493 			pdev->stats.tx.dropped.fw_rem_notx);
5494 	DP_PRINT_STATS("	FW removed untransmitted fw_reason1 = %d",
5495 			pdev->stats.tx.dropped.fw_reason1);
5496 	DP_PRINT_STATS("	FW removed untransmitted fw_reason2 = %d",
5497 			pdev->stats.tx.dropped.fw_reason2);
5498 	DP_PRINT_STATS("	FW removed untransmitted fw_reason3 = %d",
5499 			pdev->stats.tx.dropped.fw_reason3);
5500 	DP_PRINT_STATS("	Aged Out from msdu/mpdu queues = %d",
5501 			pdev->stats.tx.dropped.age_out);
5502 	DP_PRINT_STATS("	Multicast:");
5503 	DP_PRINT_STATS("	Packets: %u",
5504 		       pdev->stats.tx.mcast.num);
5505 	DP_PRINT_STATS("	Bytes: %llu",
5506 		       pdev->stats.tx.mcast.bytes);
5507 	DP_PRINT_STATS("Scatter Gather:");
5508 	DP_PRINT_STATS("	Packets = %d",
5509 			pdev->stats.tx_i.sg.sg_pkt.num);
5510 	DP_PRINT_STATS("	Bytes = %llu",
5511 			pdev->stats.tx_i.sg.sg_pkt.bytes);
5512 	DP_PRINT_STATS("	Dropped By Host = %d",
5513 			pdev->stats.tx_i.sg.dropped_host.num);
5514 	DP_PRINT_STATS("	Dropped By Target = %d",
5515 			pdev->stats.tx_i.sg.dropped_target);
5516 	DP_PRINT_STATS("TSO:");
5517 	DP_PRINT_STATS("	Number of Segments = %d",
5518 			pdev->stats.tx_i.tso.num_seg);
5519 	DP_PRINT_STATS("	Packets = %d",
5520 			pdev->stats.tx_i.tso.tso_pkt.num);
5521 	DP_PRINT_STATS("	Bytes = %llu",
5522 			pdev->stats.tx_i.tso.tso_pkt.bytes);
5523 	DP_PRINT_STATS("	Dropped By Host = %d",
5524 			pdev->stats.tx_i.tso.dropped_host.num);
5525 	DP_PRINT_STATS("Mcast Enhancement:");
5526 	DP_PRINT_STATS("	Packets = %d",
5527 			pdev->stats.tx_i.mcast_en.mcast_pkt.num);
5528 	DP_PRINT_STATS("	Bytes = %llu",
5529 			pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
5530 	DP_PRINT_STATS("	Dropped: Map Errors = %d",
5531 			pdev->stats.tx_i.mcast_en.dropped_map_error);
5532 	DP_PRINT_STATS("	Dropped: Self Mac = %d",
5533 			pdev->stats.tx_i.mcast_en.dropped_self_mac);
5534 	DP_PRINT_STATS("	Dropped: Send Fail = %d",
5535 			pdev->stats.tx_i.mcast_en.dropped_send_fail);
5536 	DP_PRINT_STATS("	Unicast sent = %d",
5537 			pdev->stats.tx_i.mcast_en.ucast);
5538 	DP_PRINT_STATS("Raw:");
5539 	DP_PRINT_STATS("	Packets = %d",
5540 			pdev->stats.tx_i.raw.raw_pkt.num);
5541 	DP_PRINT_STATS("	Bytes = %llu",
5542 			pdev->stats.tx_i.raw.raw_pkt.bytes);
5543 	DP_PRINT_STATS("	DMA map error = %d",
5544 			pdev->stats.tx_i.raw.dma_map_error);
5545 	DP_PRINT_STATS("Reinjected:");
5546 	DP_PRINT_STATS("	Packets = %d",
5547 			pdev->stats.tx_i.reinject_pkts.num);
5548 	DP_PRINT_STATS("	Bytes = %llu\n",
5549 			pdev->stats.tx_i.reinject_pkts.bytes);
5550 	DP_PRINT_STATS("Inspected:");
5551 	DP_PRINT_STATS("	Packets = %d",
5552 			pdev->stats.tx_i.inspect_pkts.num);
5553 	DP_PRINT_STATS("	Bytes = %llu",
5554 			pdev->stats.tx_i.inspect_pkts.bytes);
5555 	DP_PRINT_STATS("Nawds Multicast:");
5556 	DP_PRINT_STATS("	Packets = %d",
5557 			pdev->stats.tx_i.nawds_mcast.num);
5558 	DP_PRINT_STATS("	Bytes = %llu",
5559 			pdev->stats.tx_i.nawds_mcast.bytes);
5560 	DP_PRINT_STATS("CCE Classified:");
5561 	DP_PRINT_STATS("	CCE Classified Packets: %u",
5562 			pdev->stats.tx_i.cce_classified);
5563 	DP_PRINT_STATS("	RAW CCE Classified Packets: %u",
5564 			pdev->stats.tx_i.cce_classified_raw);
5565 	DP_PRINT_STATS("Mesh stats:");
5566 	DP_PRINT_STATS("	frames to firmware: %u",
5567 			pdev->stats.tx_i.mesh.exception_fw);
5568 	DP_PRINT_STATS("	completions from fw: %u",
5569 			pdev->stats.tx_i.mesh.completion_fw);
5570 	DP_PRINT_STATS("PPDU stats counter");
5571 	for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
5572 		DP_PRINT_STATS("	Tag[%d] = %llu", index,
5573 				pdev->stats.ppdu_stats_counter[index]);
5574 	}
5575 }
5576 
5577 /**
5578  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
5579  * @pdev: DP_PDEV Handle
5580  *
5581  * Return: void
5582  */
5583 static inline void
5584 dp_print_pdev_rx_stats(struct dp_pdev *pdev)
5585 {
5586 	DP_PRINT_STATS("PDEV Rx Stats:\n");
5587 	DP_PRINT_STATS("Received From HW (Per Rx Ring):");
5588 	DP_PRINT_STATS("	Packets = %d %d %d %d",
5589 			pdev->stats.rx.rcvd_reo[0].num,
5590 			pdev->stats.rx.rcvd_reo[1].num,
5591 			pdev->stats.rx.rcvd_reo[2].num,
5592 			pdev->stats.rx.rcvd_reo[3].num);
5593 	DP_PRINT_STATS("	Bytes = %llu %llu %llu %llu",
5594 			pdev->stats.rx.rcvd_reo[0].bytes,
5595 			pdev->stats.rx.rcvd_reo[1].bytes,
5596 			pdev->stats.rx.rcvd_reo[2].bytes,
5597 			pdev->stats.rx.rcvd_reo[3].bytes);
5598 	DP_PRINT_STATS("Replenished:");
5599 	DP_PRINT_STATS("	Packets = %d",
5600 			pdev->stats.replenish.pkts.num);
5601 	DP_PRINT_STATS("	Bytes = %llu",
5602 			pdev->stats.replenish.pkts.bytes);
5603 	DP_PRINT_STATS("	Buffers Added To Freelist = %d",
5604 			pdev->stats.buf_freelist);
5605 	DP_PRINT_STATS("	Low threshold intr = %d",
5606 			pdev->stats.replenish.low_thresh_intrs);
5607 	DP_PRINT_STATS("Dropped:");
5608 	DP_PRINT_STATS("	msdu_not_done = %d",
5609 			pdev->stats.dropped.msdu_not_done);
5610 	DP_PRINT_STATS("        mon_rx_drop = %d",
5611 			pdev->stats.dropped.mon_rx_drop);
5612 	DP_PRINT_STATS("Sent To Stack:");
5613 	DP_PRINT_STATS("	Packets = %d",
5614 			pdev->stats.rx.to_stack.num);
5615 	DP_PRINT_STATS("	Bytes = %llu",
5616 			pdev->stats.rx.to_stack.bytes);
5617 	DP_PRINT_STATS("Multicast/Broadcast:");
5618 	DP_PRINT_STATS("	Packets = %d",
5619 			(pdev->stats.rx.multicast.num +
5620 			pdev->stats.rx.bcast.num));
5621 	DP_PRINT_STATS("	Bytes = %llu",
5622 			(pdev->stats.rx.multicast.bytes +
5623 			pdev->stats.rx.bcast.bytes));
5624 	DP_PRINT_STATS("Errors:");
5625 	DP_PRINT_STATS("	Rxdma Ring Un-inititalized = %d",
5626 			pdev->stats.replenish.rxdma_err);
5627 	DP_PRINT_STATS("	Desc Alloc Failed: = %d",
5628 			pdev->stats.err.desc_alloc_fail);
5629 	DP_PRINT_STATS("	IP checksum error = %d",
5630 		       pdev->stats.err.ip_csum_err);
5631 	DP_PRINT_STATS("	TCP/UDP checksum error = %d",
5632 		       pdev->stats.err.tcp_udp_csum_err);
5633 
5634 	/* Get bar_recv_cnt */
5635 	dp_aggregate_pdev_ctrl_frames_stats(pdev);
5636 	DP_PRINT_STATS("BAR Received Count: = %d",
5637 			pdev->stats.rx.bar_recv_cnt);
5638 
5639 }
5640 
5641 /**
5642  * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
5643  * @pdev: DP_PDEV Handle
5644  *
5645  * Return: void
5646  */
5647 static inline void
5648 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
5649 {
5650 	struct cdp_pdev_mon_stats *rx_mon_stats;
5651 
5652 	rx_mon_stats = &pdev->rx_mon_stats;
5653 
5654 	DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
5655 
5656 	dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
5657 
5658 	DP_PRINT_STATS("status_ppdu_done_cnt = %d",
5659 		       rx_mon_stats->status_ppdu_done);
5660 	DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
5661 		       rx_mon_stats->dest_ppdu_done);
5662 	DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
5663 		       rx_mon_stats->dest_mpdu_done);
5664 	DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
5665 		       rx_mon_stats->dest_mpdu_drop);
5666 }
5667 
5668 /**
5669  * dp_print_soc_tx_stats(): Print SOC level  stats
5670  * @soc DP_SOC Handle
5671  *
5672  * Return: void
5673  */
5674 static inline void
5675 dp_print_soc_tx_stats(struct dp_soc *soc)
5676 {
5677 	uint8_t desc_pool_id;
5678 	soc->stats.tx.desc_in_use = 0;
5679 
5680 	DP_PRINT_STATS("SOC Tx Stats:\n");
5681 
5682 	for (desc_pool_id = 0;
5683 	     desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5684 	     desc_pool_id++)
5685 		soc->stats.tx.desc_in_use +=
5686 			soc->tx_desc[desc_pool_id].num_allocated;
5687 
5688 	DP_PRINT_STATS("Tx Descriptors In Use = %d",
5689 			soc->stats.tx.desc_in_use);
5690 	DP_PRINT_STATS("Invalid peer:");
5691 	DP_PRINT_STATS("	Packets = %d",
5692 			soc->stats.tx.tx_invalid_peer.num);
5693 	DP_PRINT_STATS("	Bytes = %llu",
5694 			soc->stats.tx.tx_invalid_peer.bytes);
5695 	DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
5696 			soc->stats.tx.tcl_ring_full[0],
5697 			soc->stats.tx.tcl_ring_full[1],
5698 			soc->stats.tx.tcl_ring_full[2]);
5699 
5700 }
5701 /**
5702  * dp_print_soc_rx_stats: Print SOC level Rx stats
5703  * @soc: DP_SOC Handle
5704  *
5705  * Return:void
5706  */
5707 static inline void
5708 dp_print_soc_rx_stats(struct dp_soc *soc)
5709 {
5710 	uint32_t i;
5711 	char reo_error[DP_REO_ERR_LENGTH];
5712 	char rxdma_error[DP_RXDMA_ERR_LENGTH];
5713 	uint8_t index = 0;
5714 
5715 	DP_PRINT_STATS("SOC Rx Stats:\n");
5716 	DP_PRINT_STATS("Errors:\n");
5717 	DP_PRINT_STATS("Rx Decrypt Errors = %d",
5718 			(soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
5719 			soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
5720 	DP_PRINT_STATS("Invalid RBM = %d",
5721 			soc->stats.rx.err.invalid_rbm);
5722 	DP_PRINT_STATS("Invalid Vdev = %d",
5723 			soc->stats.rx.err.invalid_vdev);
5724 	DP_PRINT_STATS("Invalid Pdev = %d",
5725 			soc->stats.rx.err.invalid_pdev);
5726 	DP_PRINT_STATS("Invalid Peer = %d",
5727 			soc->stats.rx.err.rx_invalid_peer.num);
5728 	DP_PRINT_STATS("HAL Ring Access Fail = %d",
5729 			soc->stats.rx.err.hal_ring_access_fail);
5730 
5731 	for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
5732 		index += qdf_snprint(&rxdma_error[index],
5733 				DP_RXDMA_ERR_LENGTH - index,
5734 				" %d", soc->stats.rx.err.rxdma_error[i]);
5735 	}
5736 	DP_PRINT_STATS("RXDMA Error (0-31):%s",
5737 			rxdma_error);
5738 
5739 	index = 0;
5740 	for (i = 0; i < HAL_REO_ERR_MAX; i++) {
5741 		index += qdf_snprint(&reo_error[index],
5742 				DP_REO_ERR_LENGTH - index,
5743 				" %d", soc->stats.rx.err.reo_error[i]);
5744 	}
5745 	DP_PRINT_STATS("REO Error(0-14):%s",
5746 			reo_error);
5747 }
5748 
5749 
5750 /**
5751  * dp_print_ring_stat_from_hal(): Print hal level ring stats
5752  * @soc: DP_SOC handle
5753  * @srng: DP_SRNG handle
5754  * @ring_name: SRNG name
5755  *
5756  * Return: void
5757  */
5758 static inline void
5759 dp_print_ring_stat_from_hal(struct dp_soc *soc,  struct dp_srng *srng,
5760 	char *ring_name)
5761 {
5762 	uint32_t tailp;
5763 	uint32_t headp;
5764 
5765 	if (srng->hal_srng != NULL) {
5766 		hal_api_get_tphp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
5767 		DP_PRINT_STATS("%s : Head pointer = %d  Tail Pointer = %d\n",
5768 				ring_name, headp, tailp);
5769 	}
5770 }
5771 
5772 /**
5773  * dp_print_ring_stats(): Print tail and head pointer
5774  * @pdev: DP_PDEV handle
5775  *
5776  * Return:void
5777  */
5778 static inline void
5779 dp_print_ring_stats(struct dp_pdev *pdev)
5780 {
5781 	uint32_t i;
5782 	char ring_name[STR_MAXLEN + 1];
5783 	int mac_id;
5784 
5785 	dp_print_ring_stat_from_hal(pdev->soc,
5786 			&pdev->soc->reo_exception_ring,
5787 			"Reo Exception Ring");
5788 	dp_print_ring_stat_from_hal(pdev->soc,
5789 			&pdev->soc->reo_reinject_ring,
5790 			"Reo Inject Ring");
5791 	dp_print_ring_stat_from_hal(pdev->soc,
5792 			&pdev->soc->reo_cmd_ring,
5793 			"Reo Command Ring");
5794 	dp_print_ring_stat_from_hal(pdev->soc,
5795 			&pdev->soc->reo_status_ring,
5796 			"Reo Status Ring");
5797 	dp_print_ring_stat_from_hal(pdev->soc,
5798 			&pdev->soc->rx_rel_ring,
5799 			"Rx Release ring");
5800 	dp_print_ring_stat_from_hal(pdev->soc,
5801 			&pdev->soc->tcl_cmd_ring,
5802 			"Tcl command Ring");
5803 	dp_print_ring_stat_from_hal(pdev->soc,
5804 			&pdev->soc->tcl_status_ring,
5805 			"Tcl Status Ring");
5806 	dp_print_ring_stat_from_hal(pdev->soc,
5807 			&pdev->soc->wbm_desc_rel_ring,
5808 			"Wbm Desc Rel Ring");
5809 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5810 		snprintf(ring_name, STR_MAXLEN, "Reo Dest Ring %d", i);
5811 		dp_print_ring_stat_from_hal(pdev->soc,
5812 				&pdev->soc->reo_dest_ring[i],
5813 				ring_name);
5814 	}
5815 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
5816 		snprintf(ring_name, STR_MAXLEN, "Tcl Data Ring %d", i);
5817 		dp_print_ring_stat_from_hal(pdev->soc,
5818 				&pdev->soc->tcl_data_ring[i],
5819 				ring_name);
5820 	}
5821 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
5822 		snprintf(ring_name, STR_MAXLEN, "Tx Comp Ring %d", i);
5823 		dp_print_ring_stat_from_hal(pdev->soc,
5824 				&pdev->soc->tx_comp_ring[i],
5825 				ring_name);
5826 	}
5827 	dp_print_ring_stat_from_hal(pdev->soc,
5828 			&pdev->rx_refill_buf_ring,
5829 			"Rx Refill Buf Ring");
5830 
5831 	dp_print_ring_stat_from_hal(pdev->soc,
5832 			&pdev->rx_refill_buf_ring2,
5833 			"Second Rx Refill Buf Ring");
5834 
5835 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5836 		dp_print_ring_stat_from_hal(pdev->soc,
5837 				&pdev->rxdma_mon_buf_ring[mac_id],
5838 				"Rxdma Mon Buf Ring");
5839 		dp_print_ring_stat_from_hal(pdev->soc,
5840 				&pdev->rxdma_mon_dst_ring[mac_id],
5841 				"Rxdma Mon Dst Ring");
5842 		dp_print_ring_stat_from_hal(pdev->soc,
5843 				&pdev->rxdma_mon_status_ring[mac_id],
5844 				"Rxdma Mon Status Ring");
5845 		dp_print_ring_stat_from_hal(pdev->soc,
5846 				&pdev->rxdma_mon_desc_ring[mac_id],
5847 				"Rxdma mon desc Ring");
5848 	}
5849 
5850 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
5851 		snprintf(ring_name, STR_MAXLEN, "Rxdma err dst ring %d", i);
5852 		dp_print_ring_stat_from_hal(pdev->soc,
5853 			&pdev->rxdma_err_dst_ring[i],
5854 			ring_name);
5855 	}
5856 
5857 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
5858 		snprintf(ring_name, STR_MAXLEN, "Rx mac buf ring %d", i);
5859 		dp_print_ring_stat_from_hal(pdev->soc,
5860 				&pdev->rx_mac_buf_ring[i],
5861 				ring_name);
5862 	}
5863 }
5864 
5865 /**
5866  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
5867  * @vdev: DP_VDEV handle
5868  *
5869  * Return:void
5870  */
5871 static inline void
5872 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
5873 {
5874 	struct dp_peer *peer = NULL;
5875 	struct dp_soc *soc = (struct dp_soc *)vdev->pdev->soc;
5876 
5877 	DP_STATS_CLR(vdev->pdev);
5878 	DP_STATS_CLR(vdev->pdev->soc);
5879 	DP_STATS_CLR(vdev);
5880 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5881 		if (!peer)
5882 			return;
5883 		DP_STATS_CLR(peer);
5884 
5885 		if (soc->cdp_soc.ol_ops->update_dp_stats) {
5886 			soc->cdp_soc.ol_ops->update_dp_stats(
5887 					vdev->pdev->ctrl_pdev,
5888 					&peer->stats,
5889 					peer->peer_ids[0],
5890 					UPDATE_PEER_STATS);
5891 		}
5892 
5893 	}
5894 
5895 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5896 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
5897 				&vdev->stats, (uint16_t)vdev->vdev_id,
5898 				UPDATE_VDEV_STATS);
5899 }
5900 
5901 /**
5902  * dp_print_rx_rates(): Print Rx rate stats
5903  * @vdev: DP_VDEV handle
5904  *
5905  * Return:void
5906  */
5907 static inline void
5908 dp_print_rx_rates(struct dp_vdev *vdev)
5909 {
5910 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5911 	uint8_t i, mcs, pkt_type;
5912 	uint8_t index = 0;
5913 	char nss[DP_NSS_LENGTH];
5914 
5915 	DP_PRINT_STATS("Rx Rate Info:\n");
5916 
5917 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5918 		index = 0;
5919 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5920 			if (!dp_rate_string[pkt_type][mcs].valid)
5921 				continue;
5922 
5923 			DP_PRINT_STATS("	%s = %d",
5924 					dp_rate_string[pkt_type][mcs].mcs_type,
5925 					pdev->stats.rx.pkt_type[pkt_type].
5926 					mcs_count[mcs]);
5927 		}
5928 
5929 		DP_PRINT_STATS("\n");
5930 	}
5931 
5932 	index = 0;
5933 	for (i = 0; i < SS_COUNT; i++) {
5934 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5935 				" %d", pdev->stats.rx.nss[i]);
5936 	}
5937 	DP_PRINT_STATS("NSS(1-8) = %s",
5938 			nss);
5939 
5940 	DP_PRINT_STATS("SGI ="
5941 			" 0.8us %d,"
5942 			" 0.4us %d,"
5943 			" 1.6us %d,"
5944 			" 3.2us %d,",
5945 			pdev->stats.rx.sgi_count[0],
5946 			pdev->stats.rx.sgi_count[1],
5947 			pdev->stats.rx.sgi_count[2],
5948 			pdev->stats.rx.sgi_count[3]);
5949 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
5950 			pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
5951 			pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
5952 	DP_PRINT_STATS("Reception Type ="
5953 			" SU: %d,"
5954 			" MU_MIMO:%d,"
5955 			" MU_OFDMA:%d,"
5956 			" MU_OFDMA_MIMO:%d\n",
5957 			pdev->stats.rx.reception_type[0],
5958 			pdev->stats.rx.reception_type[1],
5959 			pdev->stats.rx.reception_type[2],
5960 			pdev->stats.rx.reception_type[3]);
5961 	DP_PRINT_STATS("Aggregation:\n");
5962 	DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
5963 			pdev->stats.rx.ampdu_cnt);
5964 	DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
5965 			pdev->stats.rx.non_ampdu_cnt);
5966 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
5967 			pdev->stats.rx.amsdu_cnt);
5968 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
5969 			pdev->stats.rx.non_amsdu_cnt);
5970 }
5971 
5972 /**
5973  * dp_print_tx_rates(): Print tx rates
5974  * @vdev: DP_VDEV handle
5975  *
5976  * Return:void
5977  */
5978 static inline void
5979 dp_print_tx_rates(struct dp_vdev *vdev)
5980 {
5981 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5982 	uint8_t mcs, pkt_type;
5983 	uint8_t index;
5984 	char nss[DP_NSS_LENGTH];
5985 	int nss_index;
5986 
5987 	DP_PRINT_STATS("Tx Rate Info:\n");
5988 
5989 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5990 		index = 0;
5991 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5992 			if (!dp_rate_string[pkt_type][mcs].valid)
5993 				continue;
5994 
5995 			DP_PRINT_STATS("	%s = %d",
5996 					dp_rate_string[pkt_type][mcs].mcs_type,
5997 					pdev->stats.tx.pkt_type[pkt_type].
5998 					mcs_count[mcs]);
5999 		}
6000 
6001 		DP_PRINT_STATS("\n");
6002 	}
6003 
6004 	DP_PRINT_STATS("SGI ="
6005 			" 0.8us %d"
6006 			" 0.4us %d"
6007 			" 1.6us %d"
6008 			" 3.2us %d",
6009 			pdev->stats.tx.sgi_count[0],
6010 			pdev->stats.tx.sgi_count[1],
6011 			pdev->stats.tx.sgi_count[2],
6012 			pdev->stats.tx.sgi_count[3]);
6013 
6014 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
6015 			pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
6016 			pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
6017 
6018 	index = 0;
6019 	for (nss_index = 0; nss_index < SS_COUNT; nss_index++) {
6020 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6021 				" %d", pdev->stats.tx.nss[nss_index]);
6022 	}
6023 
6024 	DP_PRINT_STATS("NSS(1-8) = %s", nss);
6025 	DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
6026 	DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
6027 	DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
6028 	DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
6029 	DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
6030 
6031 	DP_PRINT_STATS("Aggregation:\n");
6032 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
6033 			pdev->stats.tx.amsdu_cnt);
6034 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
6035 			pdev->stats.tx.non_amsdu_cnt);
6036 }
6037 
6038 /**
6039  * dp_print_peer_stats():print peer stats
6040  * @peer: DP_PEER handle
6041  *
6042  * return void
6043  */
6044 static inline void dp_print_peer_stats(struct dp_peer *peer)
6045 {
6046 	uint8_t i, mcs, pkt_type;
6047 	uint32_t index;
6048 	char nss[DP_NSS_LENGTH];
6049 	DP_PRINT_STATS("Node Tx Stats:\n");
6050 	DP_PRINT_STATS("Total Packet Completions = %d",
6051 			peer->stats.tx.comp_pkt.num);
6052 	DP_PRINT_STATS("Total Bytes Completions = %llu",
6053 			peer->stats.tx.comp_pkt.bytes);
6054 	DP_PRINT_STATS("Success Packets = %d",
6055 			peer->stats.tx.tx_success.num);
6056 	DP_PRINT_STATS("Success Bytes = %llu",
6057 			peer->stats.tx.tx_success.bytes);
6058 	DP_PRINT_STATS("Unicast Success Packets = %d",
6059 			peer->stats.tx.ucast.num);
6060 	DP_PRINT_STATS("Unicast Success Bytes = %llu",
6061 			peer->stats.tx.ucast.bytes);
6062 	DP_PRINT_STATS("Multicast Success Packets = %d",
6063 			peer->stats.tx.mcast.num);
6064 	DP_PRINT_STATS("Multicast Success Bytes = %llu",
6065 			peer->stats.tx.mcast.bytes);
6066 	DP_PRINT_STATS("Broadcast Success Packets = %d",
6067 			peer->stats.tx.bcast.num);
6068 	DP_PRINT_STATS("Broadcast Success Bytes = %llu",
6069 			peer->stats.tx.bcast.bytes);
6070 	DP_PRINT_STATS("Packets Failed = %d",
6071 			peer->stats.tx.tx_failed);
6072 	DP_PRINT_STATS("Packets In OFDMA = %d",
6073 			peer->stats.tx.ofdma);
6074 	DP_PRINT_STATS("Packets In STBC = %d",
6075 			peer->stats.tx.stbc);
6076 	DP_PRINT_STATS("Packets In LDPC = %d",
6077 			peer->stats.tx.ldpc);
6078 	DP_PRINT_STATS("Packet Retries = %d",
6079 			peer->stats.tx.retries);
6080 	DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
6081 			peer->stats.tx.amsdu_cnt);
6082 	DP_PRINT_STATS("Last Packet RSSI = %d",
6083 			peer->stats.tx.last_ack_rssi);
6084 	DP_PRINT_STATS("Dropped At FW: Removed = %d",
6085 			peer->stats.tx.dropped.fw_rem);
6086 	DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
6087 			peer->stats.tx.dropped.fw_rem_tx);
6088 	DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
6089 			peer->stats.tx.dropped.fw_rem_notx);
6090 	DP_PRINT_STATS("Dropped : Age Out = %d",
6091 			peer->stats.tx.dropped.age_out);
6092 	DP_PRINT_STATS("NAWDS : ");
6093 	DP_PRINT_STATS("	Nawds multicast Drop Tx Packet = %d",
6094 			peer->stats.tx.nawds_mcast_drop);
6095 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Count = %d",
6096 			peer->stats.tx.nawds_mcast.num);
6097 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Bytes = %llu",
6098 			peer->stats.tx.nawds_mcast.bytes);
6099 
6100 	DP_PRINT_STATS("Rate Info:");
6101 
6102 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
6103 		index = 0;
6104 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
6105 			if (!dp_rate_string[pkt_type][mcs].valid)
6106 				continue;
6107 
6108 			DP_PRINT_STATS("	%s = %d",
6109 					dp_rate_string[pkt_type][mcs].mcs_type,
6110 					peer->stats.tx.pkt_type[pkt_type].
6111 					mcs_count[mcs]);
6112 		}
6113 
6114 		DP_PRINT_STATS("\n");
6115 	}
6116 
6117 	DP_PRINT_STATS("SGI = "
6118 			" 0.8us %d"
6119 			" 0.4us %d"
6120 			" 1.6us %d"
6121 			" 3.2us %d",
6122 			peer->stats.tx.sgi_count[0],
6123 			peer->stats.tx.sgi_count[1],
6124 			peer->stats.tx.sgi_count[2],
6125 			peer->stats.tx.sgi_count[3]);
6126 	DP_PRINT_STATS("Excess Retries per AC ");
6127 	DP_PRINT_STATS("	 Best effort = %d",
6128 			peer->stats.tx.excess_retries_per_ac[0]);
6129 	DP_PRINT_STATS("	 Background= %d",
6130 			peer->stats.tx.excess_retries_per_ac[1]);
6131 	DP_PRINT_STATS("	 Video = %d",
6132 			peer->stats.tx.excess_retries_per_ac[2]);
6133 	DP_PRINT_STATS("	 Voice = %d",
6134 			peer->stats.tx.excess_retries_per_ac[3]);
6135 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
6136 			peer->stats.tx.bw[2], peer->stats.tx.bw[3],
6137 			peer->stats.tx.bw[4], peer->stats.tx.bw[5]);
6138 
6139 	index = 0;
6140 	for (i = 0; i < SS_COUNT; i++) {
6141 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6142 				" %d", peer->stats.tx.nss[i]);
6143 	}
6144 	DP_PRINT_STATS("NSS(1-8) = %s",
6145 			nss);
6146 
6147 	DP_PRINT_STATS("Aggregation:");
6148 	DP_PRINT_STATS("	Number of Msdu's Part of Amsdu = %d",
6149 			peer->stats.tx.amsdu_cnt);
6150 	DP_PRINT_STATS("	Number of Msdu's With No Msdu Level Aggregation = %d\n",
6151 			peer->stats.tx.non_amsdu_cnt);
6152 
6153 	DP_PRINT_STATS("Node Rx Stats:");
6154 	DP_PRINT_STATS("Packets Sent To Stack = %d",
6155 			peer->stats.rx.to_stack.num);
6156 	DP_PRINT_STATS("Bytes Sent To Stack = %llu",
6157 			peer->stats.rx.to_stack.bytes);
6158 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
6159 		DP_PRINT_STATS("Ring Id = %d", i);
6160 		DP_PRINT_STATS("	Packets Received = %d",
6161 				peer->stats.rx.rcvd_reo[i].num);
6162 		DP_PRINT_STATS("	Bytes Received = %llu",
6163 				peer->stats.rx.rcvd_reo[i].bytes);
6164 	}
6165 	DP_PRINT_STATS("Multicast Packets Received = %d",
6166 			peer->stats.rx.multicast.num);
6167 	DP_PRINT_STATS("Multicast Bytes Received = %llu",
6168 			peer->stats.rx.multicast.bytes);
6169 	DP_PRINT_STATS("Broadcast Packets Received = %d",
6170 			peer->stats.rx.bcast.num);
6171 	DP_PRINT_STATS("Broadcast Bytes Received = %llu",
6172 			peer->stats.rx.bcast.bytes);
6173 	DP_PRINT_STATS("Intra BSS Packets Received = %d",
6174 			peer->stats.rx.intra_bss.pkts.num);
6175 	DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
6176 			peer->stats.rx.intra_bss.pkts.bytes);
6177 	DP_PRINT_STATS("Raw Packets Received = %d",
6178 			peer->stats.rx.raw.num);
6179 	DP_PRINT_STATS("Raw Bytes Received = %llu",
6180 			peer->stats.rx.raw.bytes);
6181 	DP_PRINT_STATS("Errors: MIC Errors = %d",
6182 			peer->stats.rx.err.mic_err);
6183 	DP_PRINT_STATS("Erros: Decryption Errors = %d",
6184 			peer->stats.rx.err.decrypt_err);
6185 	DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
6186 			peer->stats.rx.non_ampdu_cnt);
6187 	DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
6188 			peer->stats.rx.ampdu_cnt);
6189 	DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
6190 			peer->stats.rx.non_amsdu_cnt);
6191 	DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
6192 			peer->stats.rx.amsdu_cnt);
6193 	DP_PRINT_STATS("NAWDS : ");
6194 	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet = %d",
6195 			peer->stats.rx.nawds_mcast_drop);
6196 	DP_PRINT_STATS("SGI ="
6197 			" 0.8us %d"
6198 			" 0.4us %d"
6199 			" 1.6us %d"
6200 			" 3.2us %d",
6201 			peer->stats.rx.sgi_count[0],
6202 			peer->stats.rx.sgi_count[1],
6203 			peer->stats.rx.sgi_count[2],
6204 			peer->stats.rx.sgi_count[3]);
6205 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
6206 			peer->stats.rx.bw[0], peer->stats.rx.bw[1],
6207 			peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
6208 	DP_PRINT_STATS("Reception Type ="
6209 			" SU %d,"
6210 			" MU_MIMO %d,"
6211 			" MU_OFDMA %d,"
6212 			" MU_OFDMA_MIMO %d",
6213 			peer->stats.rx.reception_type[0],
6214 			peer->stats.rx.reception_type[1],
6215 			peer->stats.rx.reception_type[2],
6216 			peer->stats.rx.reception_type[3]);
6217 
6218 
6219 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
6220 		index = 0;
6221 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
6222 			if (!dp_rate_string[pkt_type][mcs].valid)
6223 				continue;
6224 
6225 			DP_PRINT_STATS("	%s = %d",
6226 					dp_rate_string[pkt_type][mcs].mcs_type,
6227 					peer->stats.rx.pkt_type[pkt_type].
6228 					mcs_count[mcs]);
6229 		}
6230 
6231 		DP_PRINT_STATS("\n");
6232 	}
6233 
6234 	index = 0;
6235 	for (i = 0; i < SS_COUNT; i++) {
6236 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6237 				" %d", peer->stats.rx.nss[i]);
6238 	}
6239 	DP_PRINT_STATS("NSS(1-8) = %s",
6240 			nss);
6241 
6242 	DP_PRINT_STATS("Aggregation:");
6243 	DP_PRINT_STATS("	Msdu's Part of Ampdu = %d",
6244 			peer->stats.rx.ampdu_cnt);
6245 	DP_PRINT_STATS("	Msdu's With No Mpdu Level Aggregation = %d",
6246 			peer->stats.rx.non_ampdu_cnt);
6247 	DP_PRINT_STATS("	Msdu's Part of Amsdu = %d",
6248 			peer->stats.rx.amsdu_cnt);
6249 	DP_PRINT_STATS("	Msdu's With No Msdu Level Aggregation = %d",
6250 			peer->stats.rx.non_amsdu_cnt);
6251 }
6252 
6253 /*
6254  * dp_get_host_peer_stats()- function to print peer stats
6255  * @pdev_handle: DP_PDEV handle
6256  * @mac_addr: mac address of the peer
6257  *
6258  * Return: void
6259  */
6260 static void
6261 dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
6262 {
6263 	struct dp_peer *peer;
6264 	uint8_t local_id;
6265 
6266 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
6267 			&local_id);
6268 
6269 	if (!peer) {
6270 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6271 			  "%s: Invalid peer\n", __func__);
6272 		return;
6273 	}
6274 
6275 	dp_print_peer_stats(peer);
6276 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
6277 }
6278 
6279 /**
6280  * dp_print_host_stats()- Function to print the stats aggregated at host
6281  * @vdev_handle: DP_VDEV handle
6282  * @type: host stats type
6283  *
6284  * Available Stat types
6285  * TXRX_CLEAR_STATS  : Clear the stats
6286  * TXRX_RX_RATE_STATS: Print Rx Rate Info
6287  * TXRX_TX_RATE_STATS: Print Tx Rate Info
6288  * TXRX_TX_HOST_STATS: Print Tx Stats
6289  * TXRX_RX_HOST_STATS: Print Rx Stats
6290  * TXRX_AST_STATS: Print AST Stats
6291  * TXRX_SRNG_PTR_STATS: Print SRNG ring pointer stats
6292  *
6293  * Return: 0 on success, print error message in case of failure
6294  */
6295 static int
6296 dp_print_host_stats(struct cdp_vdev *vdev_handle,
6297 		    struct cdp_txrx_stats_req *req)
6298 {
6299 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6300 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6301 	enum cdp_host_txrx_stats type =
6302 			dp_stats_mapping_table[req->stats][STATS_HOST];
6303 
6304 	dp_aggregate_pdev_stats(pdev);
6305 
6306 	switch (type) {
6307 	case TXRX_CLEAR_STATS:
6308 		dp_txrx_host_stats_clr(vdev);
6309 		break;
6310 	case TXRX_RX_RATE_STATS:
6311 		dp_print_rx_rates(vdev);
6312 		break;
6313 	case TXRX_TX_RATE_STATS:
6314 		dp_print_tx_rates(vdev);
6315 		break;
6316 	case TXRX_TX_HOST_STATS:
6317 		dp_print_pdev_tx_stats(pdev);
6318 		dp_print_soc_tx_stats(pdev->soc);
6319 		break;
6320 	case TXRX_RX_HOST_STATS:
6321 		dp_print_pdev_rx_stats(pdev);
6322 		dp_print_soc_rx_stats(pdev->soc);
6323 		break;
6324 	case TXRX_AST_STATS:
6325 		dp_print_ast_stats(pdev->soc);
6326 		dp_print_peer_table(vdev);
6327 		break;
6328 	case TXRX_SRNG_PTR_STATS:
6329 		dp_print_ring_stats(pdev);
6330 		break;
6331 	case TXRX_RX_MON_STATS:
6332 		dp_print_pdev_rx_mon_stats(pdev);
6333 		break;
6334 	case TXRX_REO_QUEUE_STATS:
6335 		dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
6336 		break;
6337 	default:
6338 		DP_TRACE(FATAL, "Wrong Input For TxRx Host Stats");
6339 		break;
6340 	}
6341 	return 0;
6342 }
6343 
6344 /*
6345  * dp_ppdu_ring_reset()- Reset PPDU Stats ring
6346  * @pdev: DP_PDEV handle
6347  *
6348  * Return: void
6349  */
6350 static void
6351 dp_ppdu_ring_reset(struct dp_pdev *pdev)
6352 {
6353 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
6354 	int mac_id;
6355 
6356 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
6357 
6358 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6359 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6360 							pdev->pdev_id);
6361 
6362 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6363 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6364 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6365 	}
6366 }
6367 
6368 /*
6369  * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
6370  * @pdev: DP_PDEV handle
6371  *
6372  * Return: void
6373  */
6374 static void
6375 dp_ppdu_ring_cfg(struct dp_pdev *pdev)
6376 {
6377 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
6378 	int mac_id;
6379 
6380 	htt_tlv_filter.mpdu_start = 1;
6381 	htt_tlv_filter.msdu_start = 0;
6382 	htt_tlv_filter.packet = 0;
6383 	htt_tlv_filter.msdu_end = 0;
6384 	htt_tlv_filter.mpdu_end = 0;
6385 	htt_tlv_filter.attention = 0;
6386 	htt_tlv_filter.ppdu_start = 1;
6387 	htt_tlv_filter.ppdu_end = 1;
6388 	htt_tlv_filter.ppdu_end_user_stats = 1;
6389 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6390 	htt_tlv_filter.ppdu_end_status_done = 1;
6391 	htt_tlv_filter.enable_fp = 1;
6392 	htt_tlv_filter.enable_md = 0;
6393 	if (pdev->mcopy_mode) {
6394 		htt_tlv_filter.packet_header = 1;
6395 		htt_tlv_filter.enable_mo = 1;
6396 	}
6397 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6398 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6399 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6400 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6401 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6402 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
6403 
6404 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6405 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6406 						pdev->pdev_id);
6407 
6408 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6409 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6410 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6411 	}
6412 }
6413 
6414 /*
6415  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
6416  *                              modes are enabled or not.
6417  * @dp_pdev: dp pdev handle.
6418  *
6419  * Return: bool
6420  */
6421 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
6422 {
6423 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
6424 	    !pdev->mcopy_mode)
6425 		return true;
6426 	else
6427 		return false;
6428 }
6429 
6430 /*
6431  *dp_set_bpr_enable() - API to enable/disable bpr feature
6432  *@pdev_handle: DP_PDEV handle.
6433  *@val: Provided value.
6434  *
6435  *Return: void
6436  */
6437 static void
6438 dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
6439 {
6440 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6441 
6442 	switch (val) {
6443 	case CDP_BPR_DISABLE:
6444 		pdev->bpr_enable = CDP_BPR_DISABLE;
6445 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6446 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
6447 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6448 		} else if (pdev->enhanced_stats_en &&
6449 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6450 			   !pdev->pktlog_ppdu_stats) {
6451 			dp_h2t_cfg_stats_msg_send(pdev,
6452 						  DP_PPDU_STATS_CFG_ENH_STATS,
6453 						  pdev->pdev_id);
6454 		}
6455 		break;
6456 	case CDP_BPR_ENABLE:
6457 		pdev->bpr_enable = CDP_BPR_ENABLE;
6458 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
6459 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
6460 			dp_h2t_cfg_stats_msg_send(pdev,
6461 						  DP_PPDU_STATS_CFG_BPR,
6462 						  pdev->pdev_id);
6463 		} else if (pdev->enhanced_stats_en &&
6464 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6465 			   !pdev->pktlog_ppdu_stats) {
6466 			dp_h2t_cfg_stats_msg_send(pdev,
6467 						  DP_PPDU_STATS_CFG_BPR_ENH,
6468 						  pdev->pdev_id);
6469 		} else if (pdev->pktlog_ppdu_stats) {
6470 			dp_h2t_cfg_stats_msg_send(pdev,
6471 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
6472 						  pdev->pdev_id);
6473 		}
6474 		break;
6475 	default:
6476 		break;
6477 	}
6478 }
6479 
6480 /*
6481  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
6482  * @pdev_handle: DP_PDEV handle
6483  * @val: user provided value
6484  *
6485  * Return: void
6486  */
6487 static void
6488 dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
6489 {
6490 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6491 
6492 	switch (val) {
6493 	case 0:
6494 		pdev->tx_sniffer_enable = 0;
6495 		pdev->mcopy_mode = 0;
6496 
6497 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6498 		    !pdev->bpr_enable) {
6499 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6500 			dp_ppdu_ring_reset(pdev);
6501 		} else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
6502 			dp_h2t_cfg_stats_msg_send(pdev,
6503 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
6504 		} else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
6505 			dp_h2t_cfg_stats_msg_send(pdev,
6506 						  DP_PPDU_STATS_CFG_BPR_ENH,
6507 						  pdev->pdev_id);
6508 		} else {
6509 			dp_h2t_cfg_stats_msg_send(pdev,
6510 						  DP_PPDU_STATS_CFG_BPR,
6511 						  pdev->pdev_id);
6512 		}
6513 		break;
6514 
6515 	case 1:
6516 		pdev->tx_sniffer_enable = 1;
6517 		pdev->mcopy_mode = 0;
6518 
6519 		if (!pdev->pktlog_ppdu_stats)
6520 			dp_h2t_cfg_stats_msg_send(pdev,
6521 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
6522 		break;
6523 	case 2:
6524 		pdev->mcopy_mode = 1;
6525 		pdev->tx_sniffer_enable = 0;
6526 		dp_ppdu_ring_cfg(pdev);
6527 
6528 		if (!pdev->pktlog_ppdu_stats)
6529 			dp_h2t_cfg_stats_msg_send(pdev,
6530 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
6531 		break;
6532 	default:
6533 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6534 			"Invalid value");
6535 		break;
6536 	}
6537 }
6538 
6539 /*
6540  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
6541  * @pdev_handle: DP_PDEV handle
6542  *
6543  * Return: void
6544  */
6545 static void
6546 dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
6547 {
6548 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6549 	pdev->enhanced_stats_en = 1;
6550 
6551 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added)
6552 		dp_ppdu_ring_cfg(pdev);
6553 
6554 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
6555 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
6556 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
6557 		dp_h2t_cfg_stats_msg_send(pdev,
6558 					  DP_PPDU_STATS_CFG_BPR_ENH,
6559 					  pdev->pdev_id);
6560 	}
6561 }
6562 
6563 /*
6564  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
6565  * @pdev_handle: DP_PDEV handle
6566  *
6567  * Return: void
6568  */
6569 static void
6570 dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
6571 {
6572 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6573 
6574 	pdev->enhanced_stats_en = 0;
6575 
6576 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
6577 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6578 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
6579 		dp_h2t_cfg_stats_msg_send(pdev,
6580 					  DP_PPDU_STATS_CFG_BPR,
6581 					  pdev->pdev_id);
6582 	}
6583 
6584 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added)
6585 		dp_ppdu_ring_reset(pdev);
6586 }
6587 
6588 /*
6589  * dp_get_fw_peer_stats()- function to print peer stats
6590  * @pdev_handle: DP_PDEV handle
6591  * @mac_addr: mac address of the peer
6592  * @cap: Type of htt stats requested
6593  *
6594  * Currently Supporting only MAC ID based requests Only
6595  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
6596  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
6597  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
6598  *
6599  * Return: void
6600  */
6601 static void
6602 dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
6603 		uint32_t cap)
6604 {
6605 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6606 	int i;
6607 	uint32_t config_param0 = 0;
6608 	uint32_t config_param1 = 0;
6609 	uint32_t config_param2 = 0;
6610 	uint32_t config_param3 = 0;
6611 
6612 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
6613 	config_param0 |= (1 << (cap + 1));
6614 
6615 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
6616 		config_param1 |= (1 << i);
6617 	}
6618 
6619 	config_param2 |= (mac_addr[0] & 0x000000ff);
6620 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
6621 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
6622 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
6623 
6624 	config_param3 |= (mac_addr[4] & 0x000000ff);
6625 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
6626 
6627 	dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
6628 			config_param0, config_param1, config_param2,
6629 			config_param3, 0, 0, 0);
6630 
6631 }
6632 
6633 /* This struct definition will be removed from here
6634  * once it get added in FW headers*/
6635 struct httstats_cmd_req {
6636     uint32_t    config_param0;
6637     uint32_t    config_param1;
6638     uint32_t    config_param2;
6639     uint32_t    config_param3;
6640     int cookie;
6641     u_int8_t    stats_id;
6642 };
6643 
6644 /*
6645  * dp_get_htt_stats: function to process the httstas request
6646  * @pdev_handle: DP pdev handle
6647  * @data: pointer to request data
6648  * @data_len: length for request data
6649  *
6650  * return: void
6651  */
6652 static void
6653 dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
6654 {
6655 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6656 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
6657 
6658 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
6659 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
6660 				req->config_param0, req->config_param1,
6661 				req->config_param2, req->config_param3,
6662 				req->cookie, 0, 0);
6663 }
6664 
6665 /*
6666  * dp_set_pdev_param: function to set parameters in pdev
6667  * @pdev_handle: DP pdev handle
6668  * @param: parameter type to be set
6669  * @val: value of parameter to be set
6670  *
6671  * return: void
6672  */
6673 static void dp_set_pdev_param(struct cdp_pdev *pdev_handle,
6674 		enum cdp_pdev_param_type param, uint8_t val)
6675 {
6676 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6677 	switch (param) {
6678 	case CDP_CONFIG_DEBUG_SNIFFER:
6679 		dp_config_debug_sniffer(pdev_handle, val);
6680 		break;
6681 	case CDP_CONFIG_BPR_ENABLE:
6682 		dp_set_bpr_enable(pdev_handle, val);
6683 		break;
6684 	case CDP_CONFIG_PRIMARY_RADIO:
6685 		pdev->is_primary = val;
6686 		break;
6687 	default:
6688 		break;
6689 	}
6690 }
6691 
6692 /*
6693  * dp_set_vdev_param: function to set parameters in vdev
6694  * @param: parameter type to be set
6695  * @val: value of parameter to be set
6696  *
6697  * return: void
6698  */
6699 static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
6700 		enum cdp_vdev_param_type param, uint32_t val)
6701 {
6702 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6703 	switch (param) {
6704 	case CDP_ENABLE_WDS:
6705 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6706 			  "wds_enable %d for vdev(%p) id(%d)\n",
6707 			  val, vdev, vdev->vdev_id);
6708 		vdev->wds_enabled = val;
6709 		break;
6710 	case CDP_ENABLE_NAWDS:
6711 		vdev->nawds_enabled = val;
6712 		break;
6713 	case CDP_ENABLE_MCAST_EN:
6714 		vdev->mcast_enhancement_en = val;
6715 		break;
6716 	case CDP_ENABLE_PROXYSTA:
6717 		vdev->proxysta_vdev = val;
6718 		break;
6719 	case CDP_UPDATE_TDLS_FLAGS:
6720 		vdev->tdls_link_connected = val;
6721 		break;
6722 	case CDP_CFG_WDS_AGING_TIMER:
6723 		if (val == 0)
6724 			qdf_timer_stop(&vdev->pdev->soc->wds_aging_timer);
6725 		else if (val != vdev->wds_aging_timer_val)
6726 			qdf_timer_mod(&vdev->pdev->soc->wds_aging_timer, val);
6727 
6728 		vdev->wds_aging_timer_val = val;
6729 		break;
6730 	case CDP_ENABLE_AP_BRIDGE:
6731 		if (wlan_op_mode_sta != vdev->opmode)
6732 			vdev->ap_bridge_enabled = val;
6733 		else
6734 			vdev->ap_bridge_enabled = false;
6735 		break;
6736 	case CDP_ENABLE_CIPHER:
6737 		vdev->sec_type = val;
6738 		break;
6739 	case CDP_ENABLE_QWRAP_ISOLATION:
6740 		vdev->isolation_vdev = val;
6741 		break;
6742 	default:
6743 		break;
6744 	}
6745 
6746 	dp_tx_vdev_update_search_flags(vdev);
6747 }
6748 
6749 /**
6750  * dp_peer_set_nawds: set nawds bit in peer
6751  * @peer_handle: pointer to peer
6752  * @value: enable/disable nawds
6753  *
6754  * return: void
6755  */
6756 static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
6757 {
6758 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6759 	peer->nawds_enabled = value;
6760 }
6761 
6762 /*
6763  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
6764  * @vdev_handle: DP_VDEV handle
6765  * @map_id:ID of map that needs to be updated
6766  *
6767  * Return: void
6768  */
6769 static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
6770 		uint8_t map_id)
6771 {
6772 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6773 	vdev->dscp_tid_map_id = map_id;
6774 	return;
6775 }
6776 
6777 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
6778  * @peer_handle: DP_PEER handle
6779  *
6780  * return : cdp_peer_stats pointer
6781  */
6782 static struct cdp_peer_stats*
6783 		dp_txrx_get_peer_stats(struct cdp_peer *peer_handle)
6784 {
6785 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6786 
6787 	qdf_assert(peer);
6788 
6789 	return &peer->stats;
6790 }
6791 
6792 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
6793  * @peer_handle: DP_PEER handle
6794  *
6795  * return : void
6796  */
6797 static void dp_txrx_reset_peer_stats(struct cdp_peer *peer_handle)
6798 {
6799 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6800 
6801 	qdf_assert(peer);
6802 
6803 	qdf_mem_set(&peer->stats, sizeof(peer->stats), 0);
6804 }
6805 
6806 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
6807  * @vdev_handle: DP_VDEV handle
6808  * @buf: buffer for vdev stats
6809  *
6810  * return : int
6811  */
6812 static int  dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
6813 				   bool is_aggregate)
6814 {
6815 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6816 	struct cdp_vdev_stats *vdev_stats = (struct cdp_vdev_stats *)buf;
6817 
6818 	if (is_aggregate)
6819 		dp_aggregate_vdev_stats(vdev, buf);
6820 	else
6821 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
6822 
6823 	return 0;
6824 }
6825 
6826 /*
6827  * dp_txrx_stats_publish(): publish pdev stats into a buffer
6828  * @pdev_handle: DP_PDEV handle
6829  * @buf: to hold pdev_stats
6830  *
6831  * Return: int
6832  */
6833 static int
6834 dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
6835 {
6836 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6837 	struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
6838 	struct cdp_txrx_stats_req req = {0,};
6839 
6840 	dp_aggregate_pdev_stats(pdev);
6841 	req.stats = HTT_DBG_EXT_STATS_PDEV_TX;
6842 	req.cookie_val = 1;
6843 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
6844 				req.param1, req.param2, req.param3, 0,
6845 				req.cookie_val, 0);
6846 
6847 	msleep(DP_MAX_SLEEP_TIME);
6848 
6849 	req.stats = HTT_DBG_EXT_STATS_PDEV_RX;
6850 	req.cookie_val = 1;
6851 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
6852 				req.param1, req.param2, req.param3, 0,
6853 				req.cookie_val, 0);
6854 
6855 	msleep(DP_MAX_SLEEP_TIME);
6856 	qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
6857 
6858 	return TXRX_STATS_LEVEL;
6859 }
6860 
6861 /**
6862  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
6863  * @pdev: DP_PDEV handle
6864  * @map_id: ID of map that needs to be updated
6865  * @tos: index value in map
6866  * @tid: tid value passed by the user
6867  *
6868  * Return: void
6869  */
6870 static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
6871 		uint8_t map_id, uint8_t tos, uint8_t tid)
6872 {
6873 	uint8_t dscp;
6874 	struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
6875 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
6876 	pdev->dscp_tid_map[map_id][dscp] = tid;
6877 	if (map_id < HAL_MAX_HW_DSCP_TID_MAPS)
6878 		hal_tx_update_dscp_tid(pdev->soc->hal_soc, tid,
6879 			map_id, dscp);
6880 	return;
6881 }
6882 
6883 /**
6884  * dp_fw_stats_process(): Process TxRX FW stats request
6885  * @vdev_handle: DP VDEV handle
6886  * @req: stats request
6887  *
6888  * return: int
6889  */
6890 static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
6891 		struct cdp_txrx_stats_req *req)
6892 {
6893 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6894 	struct dp_pdev *pdev = NULL;
6895 	uint32_t stats = req->stats;
6896 	uint8_t mac_id = req->mac_id;
6897 
6898 	if (!vdev) {
6899 		DP_TRACE(NONE, "VDEV not found");
6900 		return 1;
6901 	}
6902 	pdev = vdev->pdev;
6903 
6904 	/*
6905 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
6906 	 * from param0 to param3 according to below rule:
6907 	 *
6908 	 * PARAM:
6909 	 *   - config_param0 : start_offset (stats type)
6910 	 *   - config_param1 : stats bmask from start offset
6911 	 *   - config_param2 : stats bmask from start offset + 32
6912 	 *   - config_param3 : stats bmask from start offset + 64
6913 	 */
6914 	if (req->stats == CDP_TXRX_STATS_0) {
6915 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
6916 		req->param1 = 0xFFFFFFFF;
6917 		req->param2 = 0xFFFFFFFF;
6918 		req->param3 = 0xFFFFFFFF;
6919 	}
6920 
6921 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
6922 				req->param1, req->param2, req->param3,
6923 				0, 0, mac_id);
6924 }
6925 
6926 /**
6927  * dp_txrx_stats_request - function to map to firmware and host stats
6928  * @vdev: virtual handle
6929  * @req: stats request
6930  *
6931  * Return: integer
6932  */
6933 static int dp_txrx_stats_request(struct cdp_vdev *vdev,
6934 		struct cdp_txrx_stats_req *req)
6935 {
6936 	int host_stats;
6937 	int fw_stats;
6938 	enum cdp_stats stats;
6939 
6940 	if (!vdev || !req) {
6941 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6942 				"Invalid vdev/req instance");
6943 		return 0;
6944 	}
6945 
6946 	stats = req->stats;
6947 	if (stats >= CDP_TXRX_MAX_STATS)
6948 		return 0;
6949 
6950 	/*
6951 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
6952 	 *			has to be updated if new FW HTT stats added
6953 	 */
6954 	if (stats > CDP_TXRX_STATS_HTT_MAX)
6955 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
6956 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
6957 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
6958 
6959 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6960 		 "stats: %u fw_stats_type: %d host_stats_type: %d",
6961 		  stats, fw_stats, host_stats);
6962 
6963 	if (fw_stats != TXRX_FW_STATS_INVALID) {
6964 		/* update request with FW stats type */
6965 		req->stats = fw_stats;
6966 		return dp_fw_stats_process(vdev, req);
6967 	}
6968 
6969 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
6970 			(host_stats <= TXRX_HOST_STATS_MAX))
6971 		return dp_print_host_stats(vdev, req);
6972 	else
6973 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6974 				"Wrong Input for TxRx Stats");
6975 
6976 	return 0;
6977 }
6978 
6979 /*
6980  * dp_print_napi_stats(): NAPI stats
6981  * @soc - soc handle
6982  */
6983 static void dp_print_napi_stats(struct dp_soc *soc)
6984 {
6985 	hif_print_napi_stats(soc->hif_handle);
6986 }
6987 
6988 /*
6989  * dp_print_per_ring_stats(): Packet count per ring
6990  * @soc - soc handle
6991  */
6992 static void dp_print_per_ring_stats(struct dp_soc *soc)
6993 {
6994 	uint8_t ring;
6995 	uint16_t core;
6996 	uint64_t total_packets;
6997 
6998 	DP_TRACE(FATAL, "Reo packets per ring:");
6999 	for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
7000 		total_packets = 0;
7001 		DP_TRACE(FATAL, "Packets on ring %u:", ring);
7002 		for (core = 0; core < NR_CPUS; core++) {
7003 			DP_TRACE(FATAL, "Packets arriving on core %u: %llu",
7004 				core, soc->stats.rx.ring_packets[core][ring]);
7005 			total_packets += soc->stats.rx.ring_packets[core][ring];
7006 		}
7007 		DP_TRACE(FATAL, "Total packets on ring %u: %llu",
7008 			ring, total_packets);
7009 	}
7010 }
7011 
7012 /*
7013  * dp_txrx_path_stats() - Function to display dump stats
7014  * @soc - soc handle
7015  *
7016  * return: none
7017  */
7018 static void dp_txrx_path_stats(struct dp_soc *soc)
7019 {
7020 	uint8_t error_code;
7021 	uint8_t loop_pdev;
7022 	struct dp_pdev *pdev;
7023 	uint8_t i;
7024 
7025 	for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
7026 
7027 		pdev = soc->pdev_list[loop_pdev];
7028 		dp_aggregate_pdev_stats(pdev);
7029 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7030 			"Tx path Statistics:");
7031 
7032 		DP_TRACE(FATAL, "from stack: %u msdus (%llu bytes)",
7033 			pdev->stats.tx_i.rcvd.num,
7034 			pdev->stats.tx_i.rcvd.bytes);
7035 		DP_TRACE(FATAL, "processed from host: %u msdus (%llu bytes)",
7036 			pdev->stats.tx_i.processed.num,
7037 			pdev->stats.tx_i.processed.bytes);
7038 		DP_TRACE(FATAL, "successfully transmitted: %u msdus (%llu bytes)",
7039 			pdev->stats.tx.tx_success.num,
7040 			pdev->stats.tx.tx_success.bytes);
7041 
7042 		DP_TRACE(FATAL, "Dropped in host:");
7043 		DP_TRACE(FATAL, "Total packets dropped: %u,",
7044 			pdev->stats.tx_i.dropped.dropped_pkt.num);
7045 		DP_TRACE(FATAL, "Descriptor not available: %u",
7046 			pdev->stats.tx_i.dropped.desc_na.num);
7047 		DP_TRACE(FATAL, "Ring full: %u",
7048 			pdev->stats.tx_i.dropped.ring_full);
7049 		DP_TRACE(FATAL, "Enqueue fail: %u",
7050 			pdev->stats.tx_i.dropped.enqueue_fail);
7051 		DP_TRACE(FATAL, "DMA Error: %u",
7052 			pdev->stats.tx_i.dropped.dma_error);
7053 
7054 		DP_TRACE(FATAL, "Dropped in hardware:");
7055 		DP_TRACE(FATAL, "total packets dropped: %u",
7056 			pdev->stats.tx.tx_failed);
7057 		DP_TRACE(FATAL, "mpdu age out: %u",
7058 			pdev->stats.tx.dropped.age_out);
7059 		DP_TRACE(FATAL, "firmware removed: %u",
7060 			pdev->stats.tx.dropped.fw_rem);
7061 		DP_TRACE(FATAL, "firmware removed tx: %u",
7062 			pdev->stats.tx.dropped.fw_rem_tx);
7063 		DP_TRACE(FATAL, "firmware removed notx %u",
7064 			pdev->stats.tx.dropped.fw_rem_notx);
7065 		DP_TRACE(FATAL, "peer_invalid: %u",
7066 			pdev->soc->stats.tx.tx_invalid_peer.num);
7067 
7068 
7069 		DP_TRACE(FATAL, "Tx packets sent per interrupt:");
7070 		DP_TRACE(FATAL, "Single Packet: %u",
7071 			pdev->stats.tx_comp_histogram.pkts_1);
7072 		DP_TRACE(FATAL, "2-20 Packets:  %u",
7073 			pdev->stats.tx_comp_histogram.pkts_2_20);
7074 		DP_TRACE(FATAL, "21-40 Packets: %u",
7075 			pdev->stats.tx_comp_histogram.pkts_21_40);
7076 		DP_TRACE(FATAL, "41-60 Packets: %u",
7077 			pdev->stats.tx_comp_histogram.pkts_41_60);
7078 		DP_TRACE(FATAL, "61-80 Packets: %u",
7079 			pdev->stats.tx_comp_histogram.pkts_61_80);
7080 		DP_TRACE(FATAL, "81-100 Packets: %u",
7081 			pdev->stats.tx_comp_histogram.pkts_81_100);
7082 		DP_TRACE(FATAL, "101-200 Packets: %u",
7083 			pdev->stats.tx_comp_histogram.pkts_101_200);
7084 		DP_TRACE(FATAL, "   201+ Packets: %u",
7085 			pdev->stats.tx_comp_histogram.pkts_201_plus);
7086 
7087 		DP_TRACE(FATAL, "Rx path statistics");
7088 
7089 		DP_TRACE(FATAL, "delivered %u msdus ( %llu bytes),",
7090 			pdev->stats.rx.to_stack.num,
7091 			pdev->stats.rx.to_stack.bytes);
7092 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)
7093 			DP_TRACE(FATAL, "received on reo[%d] %u msdus ( %llu bytes),",
7094 					i, pdev->stats.rx.rcvd_reo[i].num,
7095 					pdev->stats.rx.rcvd_reo[i].bytes);
7096 		DP_TRACE(FATAL, "intra-bss packets %u msdus ( %llu bytes),",
7097 			pdev->stats.rx.intra_bss.pkts.num,
7098 			pdev->stats.rx.intra_bss.pkts.bytes);
7099 		DP_TRACE(FATAL, "intra-bss fails %u msdus ( %llu bytes),",
7100 			pdev->stats.rx.intra_bss.fail.num,
7101 			pdev->stats.rx.intra_bss.fail.bytes);
7102 		DP_TRACE(FATAL, "raw packets %u msdus ( %llu bytes),",
7103 			pdev->stats.rx.raw.num,
7104 			pdev->stats.rx.raw.bytes);
7105 		DP_TRACE(FATAL, "dropped: error %u msdus",
7106 			pdev->stats.rx.err.mic_err);
7107 		DP_TRACE(FATAL, "peer invalid %u",
7108 			pdev->soc->stats.rx.err.rx_invalid_peer.num);
7109 
7110 		DP_TRACE(FATAL, "Reo Statistics");
7111 		DP_TRACE(FATAL, "rbm error: %u msdus",
7112 			pdev->soc->stats.rx.err.invalid_rbm);
7113 		DP_TRACE(FATAL, "hal ring access fail: %u msdus",
7114 			pdev->soc->stats.rx.err.hal_ring_access_fail);
7115 
7116 		for (error_code = 0; error_code < HAL_REO_ERR_MAX;
7117 				error_code++) {
7118 			if (!pdev->soc->stats.rx.err.reo_error[error_code])
7119 				continue;
7120 			DP_TRACE(FATAL, "Reo error number (%u): %u msdus",
7121 				error_code,
7122 				pdev->soc->stats.rx.err.reo_error[error_code]);
7123 		}
7124 
7125 		for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
7126 				error_code++) {
7127 			if (!pdev->soc->stats.rx.err.rxdma_error[error_code])
7128 				continue;
7129 			DP_TRACE(FATAL, "Rxdma error number (%u): %u msdus",
7130 				error_code,
7131 				pdev->soc->stats.rx.err
7132 				.rxdma_error[error_code]);
7133 		}
7134 
7135 		DP_TRACE(FATAL, "Rx packets reaped per interrupt:");
7136 		DP_TRACE(FATAL, "Single Packet: %u",
7137 			 pdev->stats.rx_ind_histogram.pkts_1);
7138 		DP_TRACE(FATAL, "2-20 Packets:  %u",
7139 			 pdev->stats.rx_ind_histogram.pkts_2_20);
7140 		DP_TRACE(FATAL, "21-40 Packets: %u",
7141 			 pdev->stats.rx_ind_histogram.pkts_21_40);
7142 		DP_TRACE(FATAL, "41-60 Packets: %u",
7143 			 pdev->stats.rx_ind_histogram.pkts_41_60);
7144 		DP_TRACE(FATAL, "61-80 Packets: %u",
7145 			 pdev->stats.rx_ind_histogram.pkts_61_80);
7146 		DP_TRACE(FATAL, "81-100 Packets: %u",
7147 			 pdev->stats.rx_ind_histogram.pkts_81_100);
7148 		DP_TRACE(FATAL, "101-200 Packets: %u",
7149 			 pdev->stats.rx_ind_histogram.pkts_101_200);
7150 		DP_TRACE(FATAL, "   201+ Packets: %u",
7151 			 pdev->stats.rx_ind_histogram.pkts_201_plus);
7152 
7153 		DP_TRACE_STATS(ERROR, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
7154 			__func__,
7155 			pdev->soc->wlan_cfg_ctx->tso_enabled,
7156 			pdev->soc->wlan_cfg_ctx->lro_enabled,
7157 			pdev->soc->wlan_cfg_ctx->rx_hash,
7158 			pdev->soc->wlan_cfg_ctx->napi_enabled);
7159 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7160 		DP_TRACE_STATS(ERROR, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
7161 			__func__,
7162 			pdev->soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold,
7163 			pdev->soc->wlan_cfg_ctx->tx_flow_start_queue_offset);
7164 #endif
7165 	}
7166 }
7167 
7168 /*
7169  * dp_txrx_dump_stats() -  Dump statistics
7170  * @value - Statistics option
7171  */
7172 static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
7173 				     enum qdf_stats_verbosity_level level)
7174 {
7175 	struct dp_soc *soc =
7176 		(struct dp_soc *)psoc;
7177 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7178 
7179 	if (!soc) {
7180 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7181 			"%s: soc is NULL", __func__);
7182 		return QDF_STATUS_E_INVAL;
7183 	}
7184 
7185 	switch (value) {
7186 	case CDP_TXRX_PATH_STATS:
7187 		dp_txrx_path_stats(soc);
7188 		break;
7189 
7190 	case CDP_RX_RING_STATS:
7191 		dp_print_per_ring_stats(soc);
7192 		break;
7193 
7194 	case CDP_TXRX_TSO_STATS:
7195 		/* TODO: NOT IMPLEMENTED */
7196 		break;
7197 
7198 	case CDP_DUMP_TX_FLOW_POOL_INFO:
7199 		cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
7200 		break;
7201 
7202 	case CDP_DP_NAPI_STATS:
7203 		dp_print_napi_stats(soc);
7204 		break;
7205 
7206 	case CDP_TXRX_DESC_STATS:
7207 		/* TODO: NOT IMPLEMENTED */
7208 		break;
7209 
7210 	default:
7211 		status = QDF_STATUS_E_INVAL;
7212 		break;
7213 	}
7214 
7215 	return status;
7216 
7217 }
7218 
7219 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7220 /**
7221  * dp_update_flow_control_parameters() - API to store datapath
7222  *                            config parameters
7223  * @soc: soc handle
7224  * @cfg: ini parameter handle
7225  *
7226  * Return: void
7227  */
7228 static inline
7229 void dp_update_flow_control_parameters(struct dp_soc *soc,
7230 				struct cdp_config_params *params)
7231 {
7232 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
7233 					params->tx_flow_stop_queue_threshold;
7234 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
7235 					params->tx_flow_start_queue_offset;
7236 }
7237 #else
7238 static inline
7239 void dp_update_flow_control_parameters(struct dp_soc *soc,
7240 				struct cdp_config_params *params)
7241 {
7242 }
7243 #endif
7244 
7245 /**
7246  * dp_update_config_parameters() - API to store datapath
7247  *                            config parameters
7248  * @soc: soc handle
7249  * @cfg: ini parameter handle
7250  *
7251  * Return: status
7252  */
7253 static
7254 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
7255 				struct cdp_config_params *params)
7256 {
7257 	struct dp_soc *soc = (struct dp_soc *)psoc;
7258 
7259 	if (!(soc)) {
7260 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7261 				"%s: Invalid handle", __func__);
7262 		return QDF_STATUS_E_INVAL;
7263 	}
7264 
7265 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
7266 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
7267 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
7268 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
7269 				params->tcp_udp_checksumoffload;
7270 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
7271 	dp_update_flow_control_parameters(soc, params);
7272 
7273 	return QDF_STATUS_SUCCESS;
7274 }
7275 
7276 /**
7277  * dp_txrx_set_wds_rx_policy() - API to store datapath
7278  *                            config parameters
7279  * @vdev_handle - datapath vdev handle
7280  * @cfg: ini parameter handle
7281  *
7282  * Return: status
7283  */
7284 #ifdef WDS_VENDOR_EXTENSION
7285 void
7286 dp_txrx_set_wds_rx_policy(
7287 		struct cdp_vdev *vdev_handle,
7288 		u_int32_t val)
7289 {
7290 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7291 	struct dp_peer *peer;
7292 	if (vdev->opmode == wlan_op_mode_ap) {
7293 		/* for ap, set it on bss_peer */
7294 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
7295 			if (peer->bss_peer) {
7296 				peer->wds_ecm.wds_rx_filter = 1;
7297 				peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7298 				peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7299 				break;
7300 			}
7301 		}
7302 	} else if (vdev->opmode == wlan_op_mode_sta) {
7303 		peer = TAILQ_FIRST(&vdev->peer_list);
7304 		peer->wds_ecm.wds_rx_filter = 1;
7305 		peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7306 		peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7307 	}
7308 }
7309 
7310 /**
7311  * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
7312  *
7313  * @peer_handle - datapath peer handle
7314  * @wds_tx_ucast: policy for unicast transmission
7315  * @wds_tx_mcast: policy for multicast transmission
7316  *
7317  * Return: void
7318  */
7319 void
7320 dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
7321 		int wds_tx_ucast, int wds_tx_mcast)
7322 {
7323 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
7324 	if (wds_tx_ucast || wds_tx_mcast) {
7325 		peer->wds_enabled = 1;
7326 		peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
7327 		peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
7328 	} else {
7329 		peer->wds_enabled = 0;
7330 		peer->wds_ecm.wds_tx_ucast_4addr = 0;
7331 		peer->wds_ecm.wds_tx_mcast_4addr = 0;
7332 	}
7333 
7334 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7335 			FL("Policy Update set to :\
7336 				peer->wds_enabled %d\
7337 				peer->wds_ecm.wds_tx_ucast_4addr %d\
7338 				peer->wds_ecm.wds_tx_mcast_4addr %d"),
7339 				peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
7340 				peer->wds_ecm.wds_tx_mcast_4addr);
7341 	return;
7342 }
7343 #endif
7344 
7345 static struct cdp_wds_ops dp_ops_wds = {
7346 	.vdev_set_wds = dp_vdev_set_wds,
7347 #ifdef WDS_VENDOR_EXTENSION
7348 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
7349 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
7350 #endif
7351 };
7352 
7353 /*
7354  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
7355  * @vdev_handle - datapath vdev handle
7356  * @callback - callback function
7357  * @ctxt: callback context
7358  *
7359  */
7360 static void
7361 dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
7362 		       ol_txrx_data_tx_cb callback, void *ctxt)
7363 {
7364 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7365 
7366 	vdev->tx_non_std_data_callback.func = callback;
7367 	vdev->tx_non_std_data_callback.ctxt = ctxt;
7368 }
7369 
7370 /**
7371  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
7372  * @pdev_hdl: datapath pdev handle
7373  *
7374  * Return: opaque pointer to dp txrx handle
7375  */
7376 static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
7377 {
7378 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7379 
7380 	return pdev->dp_txrx_handle;
7381 }
7382 
7383 /**
7384  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
7385  * @pdev_hdl: datapath pdev handle
7386  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
7387  *
7388  * Return: void
7389  */
7390 static void
7391 dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
7392 {
7393 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7394 
7395 	pdev->dp_txrx_handle = dp_txrx_hdl;
7396 }
7397 
7398 /**
7399  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
7400  * @soc_handle: datapath soc handle
7401  *
7402  * Return: opaque pointer to external dp (non-core DP)
7403  */
7404 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
7405 {
7406 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
7407 
7408 	return soc->external_txrx_handle;
7409 }
7410 
7411 /**
7412  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
7413  * @soc_handle: datapath soc handle
7414  * @txrx_handle: opaque pointer to external dp (non-core DP)
7415  *
7416  * Return: void
7417  */
7418 static void
7419 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
7420 {
7421 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
7422 
7423 	soc->external_txrx_handle = txrx_handle;
7424 }
7425 
7426 #ifdef FEATURE_AST
7427 static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
7428 {
7429 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
7430 	struct dp_peer *peer = (struct dp_peer *) peer_hdl;
7431 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7432 
7433 	/*
7434 	 * For BSS peer, new peer is not created on alloc_node if the
7435 	 * peer with same address already exists , instead refcnt is
7436 	 * increased for existing peer. Correspondingly in delete path,
7437 	 * only refcnt is decreased; and peer is only deleted , when all
7438 	 * references are deleted. So delete_in_progress should not be set
7439 	 * for bss_peer, unless only 2 reference remains (peer map reference
7440 	 * and peer hash table reference).
7441 	 */
7442 	if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
7443 		return;
7444 	}
7445 
7446 	peer->delete_in_progress = true;
7447 	dp_peer_delete_ast_entries(soc, peer);
7448 }
7449 #endif
7450 
7451 #ifdef ATH_SUPPORT_NAC_RSSI
7452 /**
7453  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
7454  * @vdev_hdl: DP vdev handle
7455  * @rssi: rssi value
7456  *
7457  * Return: 0 for success. nonzero for failure.
7458  */
7459 QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
7460 				       char *mac_addr,
7461 				       uint8_t *rssi)
7462 {
7463 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7464 	struct dp_pdev *pdev = vdev->pdev;
7465 	struct dp_neighbour_peer *peer = NULL;
7466 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
7467 
7468 	*rssi = 0;
7469 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
7470 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
7471 		      neighbour_peer_list_elem) {
7472 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
7473 				mac_addr, DP_MAC_ADDR_LEN) == 0) {
7474 			*rssi = peer->rssi;
7475 			status = QDF_STATUS_SUCCESS;
7476 			break;
7477 		}
7478 	}
7479 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
7480 	return status;
7481 }
7482 
7483 static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
7484 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
7485 		uint8_t chan_num)
7486 {
7487 
7488 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7489 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7490 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7491 
7492 	pdev->nac_rssi_filtering = 1;
7493 	/* Store address of NAC (neighbour peer) which will be checked
7494 	 * against TA of received packets.
7495 	 */
7496 
7497 	if (cmd == CDP_NAC_PARAM_ADD) {
7498 		dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
7499 						 client_macaddr);
7500 	} else if (cmd == CDP_NAC_PARAM_DEL) {
7501 		dp_update_filter_neighbour_peers(vdev_handle,
7502 						 DP_NAC_PARAM_DEL,
7503 						 client_macaddr);
7504 	}
7505 
7506 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
7507 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
7508 			((void *)vdev->pdev->ctrl_pdev,
7509 			 vdev->vdev_id, cmd, bssid);
7510 
7511 	return QDF_STATUS_SUCCESS;
7512 }
7513 #endif
7514 
7515 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
7516 		uint32_t max_peers)
7517 {
7518 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7519 
7520 	soc->max_peers = max_peers;
7521 
7522 	qdf_print ("%s max_peers %u\n", __func__, max_peers);
7523 
7524 	if (dp_peer_find_attach(soc))
7525 		return QDF_STATUS_E_FAILURE;
7526 
7527 	return QDF_STATUS_SUCCESS;
7528 }
7529 
7530 /**
7531  * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
7532  * @dp_pdev: dp pdev handle
7533  * @ctrl_pdev: UMAC ctrl pdev handle
7534  *
7535  * Return: void
7536  */
7537 static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
7538 				  struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
7539 {
7540 	struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
7541 
7542 	pdev->ctrl_pdev = ctrl_pdev;
7543 }
7544 
7545 static struct cdp_cmn_ops dp_ops_cmn = {
7546 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
7547 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
7548 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
7549 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
7550 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
7551 	.txrx_peer_create = dp_peer_create_wifi3,
7552 	.txrx_peer_setup = dp_peer_setup_wifi3,
7553 #ifdef FEATURE_AST
7554 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
7555 #else
7556 	.txrx_peer_teardown = NULL,
7557 #endif
7558 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
7559 	.txrx_peer_del_ast = dp_peer_del_ast_wifi3,
7560 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
7561 	.txrx_peer_ast_hash_find = dp_peer_ast_hash_find_wifi3,
7562 	.txrx_peer_ast_get_pdev_id = dp_peer_ast_get_pdev_id_wifi3,
7563 	.txrx_peer_ast_get_next_hop = dp_peer_ast_get_next_hop_wifi3,
7564 	.txrx_peer_ast_set_type = dp_peer_ast_set_type_wifi3,
7565 	.txrx_peer_ast_get_type = dp_peer_ast_get_type_wifi3,
7566 	.txrx_peer_delete = dp_peer_delete_wifi3,
7567 	.txrx_vdev_register = dp_vdev_register_wifi3,
7568 	.txrx_soc_detach = dp_soc_detach_wifi3,
7569 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
7570 	.txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
7571 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
7572 	.txrx_ath_getstats = dp_get_device_stats,
7573 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
7574 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
7575 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
7576 	.delba_process = dp_delba_process_wifi3,
7577 	.set_addba_response = dp_set_addba_response,
7578 	.get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
7579 	.flush_cache_rx_queue = NULL,
7580 	/* TODO: get API's for dscp-tid need to be added*/
7581 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
7582 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
7583 	.txrx_stats_request = dp_txrx_stats_request,
7584 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
7585 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
7586 	.txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
7587 	.txrx_set_nac = dp_set_nac,
7588 	.txrx_get_tx_pending = dp_get_tx_pending,
7589 	.txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
7590 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
7591 	.display_stats = dp_txrx_dump_stats,
7592 	.txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
7593 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
7594 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
7595 	.txrx_intr_detach = dp_soc_interrupt_detach,
7596 	.set_pn_check = dp_set_pn_check_wifi3,
7597 	.update_config_parameters = dp_update_config_parameters,
7598 	/* TODO: Add other functions */
7599 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
7600 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
7601 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
7602 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
7603 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
7604 	.tx_send = dp_tx_send,
7605 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
7606 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
7607 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
7608 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
7609 	.txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
7610 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
7611 };
7612 
7613 static struct cdp_ctrl_ops dp_ops_ctrl = {
7614 	.txrx_peer_authorize = dp_peer_authorize,
7615 #ifdef QCA_SUPPORT_SON
7616 	.txrx_set_inact_params = dp_set_inact_params,
7617 	.txrx_start_inact_timer = dp_start_inact_timer,
7618 	.txrx_set_overload = dp_set_overload,
7619 	.txrx_peer_is_inact = dp_peer_is_inact,
7620 	.txrx_mark_peer_inact = dp_mark_peer_inact,
7621 #endif
7622 	.txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
7623 	.txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
7624 #ifdef MESH_MODE_SUPPORT
7625 	.txrx_set_mesh_mode  = dp_peer_set_mesh_mode,
7626 	.txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
7627 #endif
7628 	.txrx_set_vdev_param = dp_set_vdev_param,
7629 	.txrx_peer_set_nawds = dp_peer_set_nawds,
7630 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
7631 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
7632 	.txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
7633 	.txrx_update_filter_neighbour_peers =
7634 		dp_update_filter_neighbour_peers,
7635 	.txrx_get_sec_type = dp_get_sec_type,
7636 	/* TODO: Add other functions */
7637 	.txrx_wdi_event_sub = dp_wdi_event_sub,
7638 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
7639 #ifdef WDI_EVENT_ENABLE
7640 	.txrx_get_pldev = dp_get_pldev,
7641 #endif
7642 	.txrx_set_pdev_param = dp_set_pdev_param,
7643 #ifdef ATH_SUPPORT_NAC_RSSI
7644 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
7645 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
7646 #endif
7647 	.set_key = dp_set_michael_key,
7648 };
7649 
7650 static struct cdp_me_ops dp_ops_me = {
7651 #ifdef ATH_SUPPORT_IQUE
7652 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
7653 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
7654 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
7655 #endif
7656 };
7657 
7658 static struct cdp_mon_ops dp_ops_mon = {
7659 	.txrx_monitor_set_filter_ucast_data = NULL,
7660 	.txrx_monitor_set_filter_mcast_data = NULL,
7661 	.txrx_monitor_set_filter_non_data = NULL,
7662 	.txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
7663 	.txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
7664 	.txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
7665 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
7666 	/* Added support for HK advance filter */
7667 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
7668 };
7669 
7670 static struct cdp_host_stats_ops dp_ops_host_stats = {
7671 	.txrx_per_peer_stats = dp_get_host_peer_stats,
7672 	.get_fw_peer_stats = dp_get_fw_peer_stats,
7673 	.get_htt_stats = dp_get_htt_stats,
7674 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
7675 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
7676 	.txrx_stats_publish = dp_txrx_stats_publish,
7677 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
7678 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
7679 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
7680 	/* TODO */
7681 };
7682 
7683 static struct cdp_raw_ops dp_ops_raw = {
7684 	/* TODO */
7685 };
7686 
7687 #ifdef CONFIG_WIN
7688 static struct cdp_pflow_ops dp_ops_pflow = {
7689 	/* TODO */
7690 };
7691 #endif /* CONFIG_WIN */
7692 
7693 #ifdef FEATURE_RUNTIME_PM
7694 /**
7695  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
7696  * @opaque_pdev: DP pdev context
7697  *
7698  * DP is ready to runtime suspend if there are no pending TX packets.
7699  *
7700  * Return: QDF_STATUS
7701  */
7702 static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
7703 {
7704 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7705 	struct dp_soc *soc = pdev->soc;
7706 
7707 	/* Call DP TX flow control API to check if there is any
7708 	   pending packets */
7709 
7710 	if (soc->intr_mode == DP_INTR_POLL)
7711 		qdf_timer_stop(&soc->int_timer);
7712 
7713 	return QDF_STATUS_SUCCESS;
7714 }
7715 
7716 /**
7717  * dp_runtime_resume() - ensure DP is ready to runtime resume
7718  * @opaque_pdev: DP pdev context
7719  *
7720  * Resume DP for runtime PM.
7721  *
7722  * Return: QDF_STATUS
7723  */
7724 static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
7725 {
7726 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7727 	struct dp_soc *soc = pdev->soc;
7728 	void *hal_srng;
7729 	int i;
7730 
7731 	if (soc->intr_mode == DP_INTR_POLL)
7732 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7733 
7734 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
7735 		hal_srng = soc->tcl_data_ring[i].hal_srng;
7736 		if (hal_srng) {
7737 			/* We actually only need to acquire the lock */
7738 			hal_srng_access_start(soc->hal_soc, hal_srng);
7739 			/* Update SRC ring head pointer for HW to send
7740 			   all pending packets */
7741 			hal_srng_access_end(soc->hal_soc, hal_srng);
7742 		}
7743 	}
7744 
7745 	return QDF_STATUS_SUCCESS;
7746 }
7747 #endif /* FEATURE_RUNTIME_PM */
7748 
7749 static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
7750 {
7751 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7752 	struct dp_soc *soc = pdev->soc;
7753 
7754 	if (soc->intr_mode == DP_INTR_POLL)
7755 		qdf_timer_stop(&soc->int_timer);
7756 
7757 	return QDF_STATUS_SUCCESS;
7758 }
7759 
7760 static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
7761 {
7762 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7763 	struct dp_soc *soc = pdev->soc;
7764 
7765 	if (soc->intr_mode == DP_INTR_POLL)
7766 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7767 
7768 	return QDF_STATUS_SUCCESS;
7769 }
7770 
7771 #ifndef CONFIG_WIN
7772 static struct cdp_misc_ops dp_ops_misc = {
7773 	.tx_non_std = dp_tx_non_std,
7774 	.get_opmode = dp_get_opmode,
7775 #ifdef FEATURE_RUNTIME_PM
7776 	.runtime_suspend = dp_runtime_suspend,
7777 	.runtime_resume = dp_runtime_resume,
7778 #endif /* FEATURE_RUNTIME_PM */
7779 	.pkt_log_init = dp_pkt_log_init,
7780 	.pkt_log_con_service = dp_pkt_log_con_service,
7781 };
7782 
7783 static struct cdp_flowctl_ops dp_ops_flowctl = {
7784 	/* WIFI 3.0 DP implement as required. */
7785 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7786 	.flow_pool_map_handler = dp_tx_flow_pool_map,
7787 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
7788 	.register_pause_cb = dp_txrx_register_pause_cb,
7789 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
7790 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
7791 };
7792 
7793 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
7794 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7795 };
7796 
7797 #ifdef IPA_OFFLOAD
7798 static struct cdp_ipa_ops dp_ops_ipa = {
7799 	.ipa_get_resource = dp_ipa_get_resource,
7800 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
7801 	.ipa_op_response = dp_ipa_op_response,
7802 	.ipa_register_op_cb = dp_ipa_register_op_cb,
7803 	.ipa_get_stat = dp_ipa_get_stat,
7804 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
7805 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
7806 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
7807 	.ipa_setup = dp_ipa_setup,
7808 	.ipa_cleanup = dp_ipa_cleanup,
7809 	.ipa_setup_iface = dp_ipa_setup_iface,
7810 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
7811 	.ipa_enable_pipes = dp_ipa_enable_pipes,
7812 	.ipa_disable_pipes = dp_ipa_disable_pipes,
7813 	.ipa_set_perf_level = dp_ipa_set_perf_level
7814 };
7815 #endif
7816 
7817 static struct cdp_bus_ops dp_ops_bus = {
7818 	.bus_suspend = dp_bus_suspend,
7819 	.bus_resume = dp_bus_resume
7820 };
7821 
7822 static struct cdp_ocb_ops dp_ops_ocb = {
7823 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7824 };
7825 
7826 
7827 static struct cdp_throttle_ops dp_ops_throttle = {
7828 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7829 };
7830 
7831 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
7832 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7833 };
7834 
7835 static struct cdp_cfg_ops dp_ops_cfg = {
7836 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7837 };
7838 
7839 /*
7840  * dp_wrapper_peer_get_ref_by_addr - wrapper function to get to peer
7841  * @dev: physical device instance
7842  * @peer_mac_addr: peer mac address
7843  * @local_id: local id for the peer
7844  * @debug_id: to track enum peer access
7845 
7846  * Return: peer instance pointer
7847  */
7848 static inline void *
7849 dp_wrapper_peer_get_ref_by_addr(struct cdp_pdev *dev, u8 *peer_mac_addr,
7850 				u8 *local_id,
7851 				enum peer_debug_id_type debug_id)
7852 {
7853 	/*
7854 	 * Currently this function does not implement the "get ref"
7855 	 * functionality and is mapped to dp_find_peer_by_addr which does not
7856 	 * increment the peer ref count. So the peer state is uncertain after
7857 	 * calling this API. The functionality needs to be implemented.
7858 	 * Accordingly the corresponding release_ref function is NULL.
7859 	 */
7860 	return dp_find_peer_by_addr(dev, peer_mac_addr, local_id);
7861 }
7862 
7863 static struct cdp_peer_ops dp_ops_peer = {
7864 	.register_peer = dp_register_peer,
7865 	.clear_peer = dp_clear_peer,
7866 	.find_peer_by_addr = dp_find_peer_by_addr,
7867 	.find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
7868 	.peer_get_ref_by_addr = dp_wrapper_peer_get_ref_by_addr,
7869 	.peer_release_ref = NULL,
7870 	.local_peer_id = dp_local_peer_id,
7871 	.peer_find_by_local_id = dp_peer_find_by_local_id,
7872 	.peer_state_update = dp_peer_state_update,
7873 	.get_vdevid = dp_get_vdevid,
7874 	.get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
7875 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
7876 	.get_vdev_for_peer = dp_get_vdev_for_peer,
7877 	.get_peer_state = dp_get_peer_state,
7878 	.get_last_mgmt_timestamp = dp_get_last_mgmt_timestamp,
7879 	.update_last_mgmt_timestamp = dp_update_last_mgmt_timestamp,
7880 };
7881 #endif
7882 
7883 static struct cdp_ops dp_txrx_ops = {
7884 	.cmn_drv_ops = &dp_ops_cmn,
7885 	.ctrl_ops = &dp_ops_ctrl,
7886 	.me_ops = &dp_ops_me,
7887 	.mon_ops = &dp_ops_mon,
7888 	.host_stats_ops = &dp_ops_host_stats,
7889 	.wds_ops = &dp_ops_wds,
7890 	.raw_ops = &dp_ops_raw,
7891 #ifdef CONFIG_WIN
7892 	.pflow_ops = &dp_ops_pflow,
7893 #endif /* CONFIG_WIN */
7894 #ifndef CONFIG_WIN
7895 	.misc_ops = &dp_ops_misc,
7896 	.cfg_ops = &dp_ops_cfg,
7897 	.flowctl_ops = &dp_ops_flowctl,
7898 	.l_flowctl_ops = &dp_ops_l_flowctl,
7899 #ifdef IPA_OFFLOAD
7900 	.ipa_ops = &dp_ops_ipa,
7901 #endif
7902 	.bus_ops = &dp_ops_bus,
7903 	.ocb_ops = &dp_ops_ocb,
7904 	.peer_ops = &dp_ops_peer,
7905 	.throttle_ops = &dp_ops_throttle,
7906 	.mob_stats_ops = &dp_ops_mob_stats,
7907 #endif
7908 };
7909 
7910 /*
7911  * dp_soc_set_txrx_ring_map()
7912  * @dp_soc: DP handler for soc
7913  *
7914  * Return: Void
7915  */
7916 static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
7917 {
7918 	uint32_t i;
7919 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
7920 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_DEFAULT_MAP][i];
7921 	}
7922 }
7923 
7924 #ifdef QCA_WIFI_QCA8074
7925 /**
7926  * dp_soc_attach_wifi3() - Attach txrx SOC
7927  * @ctrl_psoc:	Opaque SOC handle from control plane
7928  * @htc_handle:	Opaque HTC handle
7929  * @hif_handle:	Opaque HIF handle
7930  * @qdf_osdev:	QDF device
7931  * @ol_ops:	Offload Operations
7932  * @device_id:	Device ID
7933  *
7934  * Return: DP SOC handle on success, NULL on failure
7935  */
7936 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
7937 			  HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
7938 			  struct ol_if_ops *ol_ops, uint16_t device_id)
7939 {
7940 	struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
7941 	int target_type;
7942 
7943 	if (!soc) {
7944 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7945 			FL("DP SOC memory allocation failed"));
7946 		goto fail0;
7947 	}
7948 
7949 	soc->device_id = device_id;
7950 	soc->cdp_soc.ops = &dp_txrx_ops;
7951 	soc->cdp_soc.ol_ops = ol_ops;
7952 	soc->ctrl_psoc = ctrl_psoc;
7953 	soc->osdev = qdf_osdev;
7954 	soc->hif_handle = hif_handle;
7955 
7956 	soc->hal_soc = hif_get_hal_handle(hif_handle);
7957 	soc->htt_handle = htt_soc_attach(soc, ctrl_psoc, htc_handle,
7958 		soc->hal_soc, qdf_osdev);
7959 	if (!soc->htt_handle) {
7960 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7961 			FL("HTT attach failed"));
7962 		goto fail1;
7963 	}
7964 
7965 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
7966 	if (!soc->wlan_cfg_ctx) {
7967 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7968 			FL("wlan_cfg_soc_attach failed"));
7969 		goto fail2;
7970 	}
7971 	target_type = hal_get_target_type(soc->hal_soc);
7972 	switch (target_type) {
7973 	case TARGET_TYPE_QCA6290:
7974 #ifdef QCA_WIFI_QCA6390
7975 	case TARGET_TYPE_QCA6390:
7976 #endif
7977 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
7978 					       REO_DST_RING_SIZE_QCA6290);
7979 		break;
7980 	case TARGET_TYPE_QCA8074:
7981 	case TARGET_TYPE_QCA8074V2:
7982 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
7983 					       REO_DST_RING_SIZE_QCA8074);
7984 		break;
7985 	default:
7986 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
7987 		qdf_assert_always(0);
7988 		break;
7989 	}
7990 
7991 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
7992 			     cfg_get(ctrl_psoc, CFG_DP_RX_HASH));
7993 	soc->cce_disable = false;
7994 
7995 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
7996 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
7997 				CDP_CFG_MAX_PEER_ID);
7998 
7999 		if (ret != -EINVAL) {
8000 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
8001 		}
8002 
8003 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
8004 				CDP_CFG_CCE_DISABLE);
8005 		if (ret == 1)
8006 			soc->cce_disable = true;
8007 	}
8008 
8009 	qdf_spinlock_create(&soc->peer_ref_mutex);
8010 
8011 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
8012 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
8013 
8014 	/* fill the tx/rx cpu ring map*/
8015 	dp_soc_set_txrx_ring_map(soc);
8016 
8017 	qdf_spinlock_create(&soc->htt_stats.lock);
8018 	/* initialize work queue for stats processing */
8019 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
8020 
8021 	/*Initialize inactivity timer for wifison */
8022 	dp_init_inact_timer(soc);
8023 
8024 	return (void *)soc;
8025 
8026 fail2:
8027 	htt_soc_detach(soc->htt_handle);
8028 fail1:
8029 	qdf_mem_free(soc);
8030 fail0:
8031 	return NULL;
8032 }
8033 #endif
8034 
8035 /*
8036  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
8037  *
8038  * @soc: handle to DP soc
8039  * @mac_id: MAC id
8040  *
8041  * Return: Return pdev corresponding to MAC
8042  */
8043 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
8044 {
8045 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
8046 		return soc->pdev_list[mac_id];
8047 
8048 	/* Typically for MCL as there only 1 PDEV*/
8049 	return soc->pdev_list[0];
8050 }
8051 
8052 /*
8053  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
8054  * @soc:		DP SoC context
8055  * @max_mac_rings:	No of MAC rings
8056  *
8057  * Return: None
8058  */
8059 static
8060 void dp_is_hw_dbs_enable(struct dp_soc *soc,
8061 				int *max_mac_rings)
8062 {
8063 	bool dbs_enable = false;
8064 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
8065 		dbs_enable = soc->cdp_soc.ol_ops->
8066 		is_hw_dbs_2x2_capable(soc->ctrl_psoc);
8067 
8068 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
8069 }
8070 
8071 /*
8072 * dp_set_pktlog_wifi3() - attach txrx vdev
8073 * @pdev: Datapath PDEV handle
8074 * @event: which event's notifications are being subscribed to
8075 * @enable: WDI event subscribe or not. (True or False)
8076 *
8077 * Return: Success, NULL on failure
8078 */
8079 #ifdef WDI_EVENT_ENABLE
8080 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
8081 	bool enable)
8082 {
8083 	struct dp_soc *soc = pdev->soc;
8084 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
8085 	int max_mac_rings = wlan_cfg_get_num_mac_rings
8086 					(pdev->wlan_cfg_ctx);
8087 	uint8_t mac_id = 0;
8088 
8089 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
8090 
8091 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
8092 			FL("Max_mac_rings %d "),
8093 			max_mac_rings);
8094 
8095 	if (enable) {
8096 		switch (event) {
8097 		case WDI_EVENT_RX_DESC:
8098 			if (pdev->monitor_vdev) {
8099 				/* Nothing needs to be done if monitor mode is
8100 				 * enabled
8101 				 */
8102 				return 0;
8103 			}
8104 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
8105 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
8106 				htt_tlv_filter.mpdu_start = 1;
8107 				htt_tlv_filter.msdu_start = 1;
8108 				htt_tlv_filter.msdu_end = 1;
8109 				htt_tlv_filter.mpdu_end = 1;
8110 				htt_tlv_filter.packet_header = 1;
8111 				htt_tlv_filter.attention = 1;
8112 				htt_tlv_filter.ppdu_start = 1;
8113 				htt_tlv_filter.ppdu_end = 1;
8114 				htt_tlv_filter.ppdu_end_user_stats = 1;
8115 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
8116 				htt_tlv_filter.ppdu_end_status_done = 1;
8117 				htt_tlv_filter.enable_fp = 1;
8118 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8119 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8120 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8121 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8122 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8123 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
8124 
8125 				for (mac_id = 0; mac_id < max_mac_rings;
8126 								mac_id++) {
8127 					int mac_for_pdev =
8128 						dp_get_mac_id_for_pdev(mac_id,
8129 								pdev->pdev_id);
8130 
8131 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8132 					 mac_for_pdev,
8133 					 pdev->rxdma_mon_status_ring[mac_id]
8134 					 .hal_srng,
8135 					 RXDMA_MONITOR_STATUS,
8136 					 RX_BUFFER_SIZE,
8137 					 &htt_tlv_filter);
8138 
8139 				}
8140 
8141 				if (soc->reap_timer_init)
8142 					qdf_timer_mod(&soc->mon_reap_timer,
8143 					DP_INTR_POLL_TIMER_MS);
8144 			}
8145 			break;
8146 
8147 		case WDI_EVENT_LITE_RX:
8148 			if (pdev->monitor_vdev) {
8149 				/* Nothing needs to be done if monitor mode is
8150 				 * enabled
8151 				 */
8152 				return 0;
8153 			}
8154 
8155 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
8156 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
8157 
8158 				htt_tlv_filter.ppdu_start = 1;
8159 				htt_tlv_filter.ppdu_end = 1;
8160 				htt_tlv_filter.ppdu_end_user_stats = 1;
8161 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
8162 				htt_tlv_filter.ppdu_end_status_done = 1;
8163 				htt_tlv_filter.mpdu_start = 1;
8164 				htt_tlv_filter.enable_fp = 1;
8165 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8166 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8167 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8168 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8169 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8170 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
8171 
8172 				for (mac_id = 0; mac_id < max_mac_rings;
8173 								mac_id++) {
8174 					int mac_for_pdev =
8175 						dp_get_mac_id_for_pdev(mac_id,
8176 								pdev->pdev_id);
8177 
8178 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8179 					mac_for_pdev,
8180 					pdev->rxdma_mon_status_ring[mac_id]
8181 					.hal_srng,
8182 					RXDMA_MONITOR_STATUS,
8183 					RX_BUFFER_SIZE_PKTLOG_LITE,
8184 					&htt_tlv_filter);
8185 				}
8186 
8187 				if (soc->reap_timer_init)
8188 					qdf_timer_mod(&soc->mon_reap_timer,
8189 					DP_INTR_POLL_TIMER_MS);
8190 			}
8191 			break;
8192 
8193 		case WDI_EVENT_LITE_T2H:
8194 			if (pdev->monitor_vdev) {
8195 				/* Nothing needs to be done if monitor mode is
8196 				 * enabled
8197 				 */
8198 				return 0;
8199 			}
8200 
8201 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
8202 				int mac_for_pdev = dp_get_mac_id_for_pdev(
8203 							mac_id,	pdev->pdev_id);
8204 
8205 				pdev->pktlog_ppdu_stats = true;
8206 				dp_h2t_cfg_stats_msg_send(pdev,
8207 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
8208 					mac_for_pdev);
8209 			}
8210 			break;
8211 
8212 		default:
8213 			/* Nothing needs to be done for other pktlog types */
8214 			break;
8215 		}
8216 	} else {
8217 		switch (event) {
8218 		case WDI_EVENT_RX_DESC:
8219 		case WDI_EVENT_LITE_RX:
8220 			if (pdev->monitor_vdev) {
8221 				/* Nothing needs to be done if monitor mode is
8222 				 * enabled
8223 				 */
8224 				return 0;
8225 			}
8226 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
8227 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
8228 
8229 				for (mac_id = 0; mac_id < max_mac_rings;
8230 								mac_id++) {
8231 					int mac_for_pdev =
8232 						dp_get_mac_id_for_pdev(mac_id,
8233 								pdev->pdev_id);
8234 
8235 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8236 					  mac_for_pdev,
8237 					  pdev->rxdma_mon_status_ring[mac_id]
8238 					  .hal_srng,
8239 					  RXDMA_MONITOR_STATUS,
8240 					  RX_BUFFER_SIZE,
8241 					  &htt_tlv_filter);
8242 				}
8243 
8244 				if (soc->reap_timer_init)
8245 					qdf_timer_stop(&soc->mon_reap_timer);
8246 			}
8247 			break;
8248 		case WDI_EVENT_LITE_T2H:
8249 			if (pdev->monitor_vdev) {
8250 				/* Nothing needs to be done if monitor mode is
8251 				 * enabled
8252 				 */
8253 				return 0;
8254 			}
8255 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
8256 			 * passing value 0. Once these macros will define in htt
8257 			 * header file will use proper macros
8258 			*/
8259 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
8260 				int mac_for_pdev =
8261 						dp_get_mac_id_for_pdev(mac_id,
8262 								pdev->pdev_id);
8263 
8264 				pdev->pktlog_ppdu_stats = false;
8265 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
8266 					dp_h2t_cfg_stats_msg_send(pdev, 0,
8267 								mac_for_pdev);
8268 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
8269 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
8270 								mac_for_pdev);
8271 				} else if (pdev->enhanced_stats_en) {
8272 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
8273 								mac_for_pdev);
8274 				}
8275 			}
8276 
8277 			break;
8278 		default:
8279 			/* Nothing needs to be done for other pktlog types */
8280 			break;
8281 		}
8282 	}
8283 	return 0;
8284 }
8285 #endif
8286