xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 6e4b9c54b687e18b0132e53b73b6cc7445a0ba3d)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_htt.h"
31 #include "dp_types.h"
32 #include "dp_internal.h"
33 #include "dp_tx.h"
34 #include "dp_tx_desc.h"
35 #include "dp_rx.h"
36 #include <cdp_txrx_handle.h>
37 #include <wlan_cfg.h>
38 #include "cdp_txrx_cmn_struct.h"
39 #include "cdp_txrx_stats_struct.h"
40 #include "cdp_txrx_cmn_reg.h"
41 #include <qdf_util.h>
42 #include "dp_peer.h"
43 #include "dp_rx_mon.h"
44 #include "htt_stats.h"
45 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
46 #include "cfg_ucfg_api.h"
47 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
48 #include "cdp_txrx_flow_ctrl_v2.h"
49 #else
50 static inline void
51 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
52 {
53 	return;
54 }
55 #endif
56 #include "dp_ipa.h"
57 
58 #ifdef CONFIG_MCL
59 #ifndef REMOVE_PKT_LOG
60 #include <pktlog_ac_api.h>
61 #include <pktlog_ac.h>
62 #endif
63 #endif
64 static void dp_pktlogmod_exit(struct dp_pdev *handle);
65 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
66 				uint8_t *peer_mac_addr,
67 				struct cdp_ctrl_objmgr_peer *ctrl_peer);
68 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
69 static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
70 static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
71 
72 #define DP_INTR_POLL_TIMER_MS	10
73 #define DP_WDS_AGING_TIMER_DEFAULT_MS	120000
74 #define DP_MCS_LENGTH (6*MAX_MCS)
75 #define DP_NSS_LENGTH (6*SS_COUNT)
76 #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
77 #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
78 #define DP_MAX_MCS_STRING_LEN 30
79 #define DP_CURR_FW_STATS_AVAIL 19
80 #define DP_HTT_DBG_EXT_STATS_MAX 256
81 #define DP_MAX_SLEEP_TIME 100
82 
83 #ifdef IPA_OFFLOAD
84 /* Exclude IPA rings from the interrupt context */
85 #define TX_RING_MASK_VAL	0xb
86 #define RX_RING_MASK_VAL	0x7
87 #else
88 #define TX_RING_MASK_VAL	0xF
89 #define RX_RING_MASK_VAL	0xF
90 #endif
91 
92 #define STR_MAXLEN	64
93 
94 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
95 
96 /* PPDU stats mask sent to FW to enable enhanced stats */
97 #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
98 /* PPDU stats mask sent to FW to support debug sniffer feature */
99 #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
100 /* PPDU stats mask sent to FW to support BPR feature*/
101 #define DP_PPDU_STATS_CFG_BPR 0x2000
102 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
103 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
104 				   DP_PPDU_STATS_CFG_ENH_STATS)
105 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
106 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
107 				      DP_PPDU_TXLITE_STATS_BITMASK_CFG)
108 
109 #define RNG_ERR		"SRNG setup failed for"
110 /**
111  * default_dscp_tid_map - Default DSCP-TID mapping
112  *
113  * DSCP        TID
114  * 000000      0
115  * 001000      1
116  * 010000      2
117  * 011000      3
118  * 100000      4
119  * 101000      5
120  * 110000      6
121  * 111000      7
122  */
123 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
124 	0, 0, 0, 0, 0, 0, 0, 0,
125 	1, 1, 1, 1, 1, 1, 1, 1,
126 	2, 2, 2, 2, 2, 2, 2, 2,
127 	3, 3, 3, 3, 3, 3, 3, 3,
128 	4, 4, 4, 4, 4, 4, 4, 4,
129 	5, 5, 5, 5, 5, 5, 5, 5,
130 	6, 6, 6, 6, 6, 6, 6, 6,
131 	7, 7, 7, 7, 7, 7, 7, 7,
132 };
133 
134 /*
135  * struct dp_rate_debug
136  *
137  * @mcs_type: print string for a given mcs
138  * @valid: valid mcs rate?
139  */
140 struct dp_rate_debug {
141 	char mcs_type[DP_MAX_MCS_STRING_LEN];
142 	uint8_t valid;
143 };
144 
145 #define MCS_VALID 1
146 #define MCS_INVALID 0
147 
148 static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
149 
150 	{
151 		{"OFDM 48 Mbps", MCS_VALID},
152 		{"OFDM 24 Mbps", MCS_VALID},
153 		{"OFDM 12 Mbps", MCS_VALID},
154 		{"OFDM 6 Mbps ", MCS_VALID},
155 		{"OFDM 54 Mbps", MCS_VALID},
156 		{"OFDM 36 Mbps", MCS_VALID},
157 		{"OFDM 18 Mbps", MCS_VALID},
158 		{"OFDM 9 Mbps ", MCS_VALID},
159 		{"INVALID ", MCS_INVALID},
160 		{"INVALID ", MCS_INVALID},
161 		{"INVALID ", MCS_INVALID},
162 		{"INVALID ", MCS_INVALID},
163 		{"INVALID ", MCS_VALID},
164 	},
165 	{
166 		{"CCK 11 Mbps Long  ", MCS_VALID},
167 		{"CCK 5.5 Mbps Long ", MCS_VALID},
168 		{"CCK 2 Mbps Long   ", MCS_VALID},
169 		{"CCK 1 Mbps Long   ", MCS_VALID},
170 		{"CCK 11 Mbps Short ", MCS_VALID},
171 		{"CCK 5.5 Mbps Short", MCS_VALID},
172 		{"CCK 2 Mbps Short  ", MCS_VALID},
173 		{"INVALID ", MCS_INVALID},
174 		{"INVALID ", MCS_INVALID},
175 		{"INVALID ", MCS_INVALID},
176 		{"INVALID ", MCS_INVALID},
177 		{"INVALID ", MCS_INVALID},
178 		{"INVALID ", MCS_VALID},
179 	},
180 	{
181 		{"HT MCS 0 (BPSK 1/2)  ", MCS_VALID},
182 		{"HT MCS 1 (QPSK 1/2)  ", MCS_VALID},
183 		{"HT MCS 2 (QPSK 3/4)  ", MCS_VALID},
184 		{"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
185 		{"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
186 		{"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
187 		{"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
188 		{"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
189 		{"INVALID ", MCS_INVALID},
190 		{"INVALID ", MCS_INVALID},
191 		{"INVALID ", MCS_INVALID},
192 		{"INVALID ", MCS_INVALID},
193 		{"INVALID ", MCS_VALID},
194 	},
195 	{
196 		{"VHT MCS 0 (BPSK 1/2)     ", MCS_VALID},
197 		{"VHT MCS 1 (QPSK 1/2)     ", MCS_VALID},
198 		{"VHT MCS 2 (QPSK 3/4)     ", MCS_VALID},
199 		{"VHT MCS 3 (16-QAM 1/2)   ", MCS_VALID},
200 		{"VHT MCS 4 (16-QAM 3/4)   ", MCS_VALID},
201 		{"VHT MCS 5 (64-QAM 2/3)   ", MCS_VALID},
202 		{"VHT MCS 6 (64-QAM 3/4)   ", MCS_VALID},
203 		{"VHT MCS 7 (64-QAM 5/6)   ", MCS_VALID},
204 		{"VHT MCS 8 (256-QAM 3/4)  ", MCS_VALID},
205 		{"VHT MCS 9 (256-QAM 5/6)  ", MCS_VALID},
206 		{"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
207 		{"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
208 		{"INVALID ", MCS_VALID},
209 	},
210 	{
211 		{"HE MCS 0 (BPSK 1/2)     ", MCS_VALID},
212 		{"HE MCS 1 (QPSK 1/2)     ", MCS_VALID},
213 		{"HE MCS 2 (QPSK 3/4)     ", MCS_VALID},
214 		{"HE MCS 3 (16-QAM 1/2)   ", MCS_VALID},
215 		{"HE MCS 4 (16-QAM 3/4)   ", MCS_VALID},
216 		{"HE MCS 5 (64-QAM 2/3)   ", MCS_VALID},
217 		{"HE MCS 6 (64-QAM 3/4)   ", MCS_VALID},
218 		{"HE MCS 7 (64-QAM 5/6)   ", MCS_VALID},
219 		{"HE MCS 8 (256-QAM 3/4)  ", MCS_VALID},
220 		{"HE MCS 9 (256-QAM 5/6)  ", MCS_VALID},
221 		{"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
222 		{"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
223 		{"INVALID ", MCS_VALID},
224 	}
225 };
226 
227 /**
228  * @brief Cpu ring map types
229  */
230 enum dp_cpu_ring_map_types {
231 	DP_DEFAULT_MAP,
232 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
233 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
234 	DP_NSS_ALL_RADIO_OFFLOADED_MAP,
235 	DP_CPU_RING_MAP_MAX
236 };
237 
238 /**
239  * @brief Cpu to tx ring map
240  */
241 static uint8_t dp_cpu_ring_map[DP_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
242 	{0x0, 0x1, 0x2, 0x0},
243 	{0x1, 0x2, 0x1, 0x2},
244 	{0x0, 0x2, 0x0, 0x2},
245 	{0x2, 0x2, 0x2, 0x2}
246 };
247 
248 /**
249  * @brief Select the type of statistics
250  */
251 enum dp_stats_type {
252 	STATS_FW = 0,
253 	STATS_HOST = 1,
254 	STATS_TYPE_MAX = 2,
255 };
256 
257 /**
258  * @brief General Firmware statistics options
259  *
260  */
261 enum dp_fw_stats {
262 	TXRX_FW_STATS_INVALID	= -1,
263 };
264 
265 /**
266  * dp_stats_mapping_table - Firmware and Host statistics
267  * currently supported
268  */
269 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
270 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
271 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
272 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
273 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
274 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
275 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
276 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
277 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
278 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
279 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
280 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
281 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
282 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
283 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
284 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
285 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
286 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
287 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
288 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
289 	/* Last ENUM for HTT FW STATS */
290 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
291 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
292 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
293 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
294 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
295 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
296 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
297 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
298 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
299 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
300 };
301 
302 /* MCL specific functions */
303 #ifdef CONFIG_MCL
304 /**
305  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
306  * @soc: pointer to dp_soc handle
307  * @intr_ctx_num: interrupt context number for which mon mask is needed
308  *
309  * For MCL, monitor mode rings are being processed in timer contexts (polled).
310  * This function is returning 0, since in interrupt mode(softirq based RX),
311  * we donot want to process monitor mode rings in a softirq.
312  *
313  * So, in case packet log is enabled for SAP/STA/P2P modes,
314  * regular interrupt processing will not process monitor mode rings. It would be
315  * done in a separate timer context.
316  *
317  * Return: 0
318  */
319 static inline
320 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
321 {
322 	return 0;
323 }
324 
325 /*
326  * dp_service_mon_rings()- timer to reap monitor rings
327  * reqd as we are not getting ppdu end interrupts
328  * @arg: SoC Handle
329  *
330  * Return:
331  *
332  */
333 static void dp_service_mon_rings(void *arg)
334 {
335 	struct dp_soc *soc = (struct dp_soc *)arg;
336 	int ring = 0, work_done, mac_id;
337 	struct dp_pdev *pdev = NULL;
338 
339 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
340 		pdev = soc->pdev_list[ring];
341 		if (!pdev)
342 			continue;
343 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
344 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
345 								pdev->pdev_id);
346 			work_done = dp_mon_process(soc, mac_for_pdev,
347 						   QCA_NAPI_BUDGET);
348 
349 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
350 				  FL("Reaped %d descs from Monitor rings"),
351 				  work_done);
352 		}
353 	}
354 
355 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
356 }
357 
358 #ifndef REMOVE_PKT_LOG
359 /**
360  * dp_pkt_log_init() - API to initialize packet log
361  * @ppdev: physical device handle
362  * @scn: HIF context
363  *
364  * Return: none
365  */
366 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
367 {
368 	struct dp_pdev *handle = (struct dp_pdev *)ppdev;
369 
370 	if (handle->pkt_log_init) {
371 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
372 			  "%s: Packet log not initialized", __func__);
373 		return;
374 	}
375 
376 	pktlog_sethandle(&handle->pl_dev, scn);
377 	pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
378 
379 	if (pktlogmod_init(scn)) {
380 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
381 			  "%s: pktlogmod_init failed", __func__);
382 		handle->pkt_log_init = false;
383 	} else {
384 		handle->pkt_log_init = true;
385 	}
386 }
387 
388 /**
389  * dp_pkt_log_con_service() - connect packet log service
390  * @ppdev: physical device handle
391  * @scn: device context
392  *
393  * Return: none
394  */
395 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
396 {
397 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
398 
399 	dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
400 	pktlog_htc_attach();
401 }
402 
403 /**
404  * dp_pktlogmod_exit() - API to cleanup pktlog info
405  * @handle: Pdev handle
406  *
407  * Return: none
408  */
409 static void dp_pktlogmod_exit(struct dp_pdev *handle)
410 {
411 	void *scn = (void *)handle->soc->hif_handle;
412 
413 	if (!scn) {
414 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
415 			  "%s: Invalid hif(scn) handle", __func__);
416 		return;
417 	}
418 
419 	pktlogmod_exit(scn);
420 	handle->pkt_log_init = false;
421 }
422 #endif
423 #else
424 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
425 
426 /**
427  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
428  * @soc: pointer to dp_soc handle
429  * @intr_ctx_num: interrupt context number for which mon mask is needed
430  *
431  * Return: mon mask value
432  */
433 static inline
434 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
435 {
436 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
437 }
438 #endif
439 
440 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
441 					struct cdp_peer *peer_hdl,
442 					uint8_t *mac_addr,
443 					enum cdp_txrx_ast_entry_type type,
444 					uint32_t flags)
445 {
446 
447 	return dp_peer_add_ast((struct dp_soc *)soc_hdl,
448 				(struct dp_peer *)peer_hdl,
449 				mac_addr,
450 				type,
451 				flags);
452 }
453 
454 static void dp_peer_del_ast_wifi3(struct cdp_soc_t *soc_hdl,
455 					 void *ast_entry_hdl)
456 {
457 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
458 	qdf_spin_lock_bh(&soc->ast_lock);
459 	dp_peer_del_ast((struct dp_soc *)soc_hdl,
460 			(struct dp_ast_entry *)ast_entry_hdl);
461 	qdf_spin_unlock_bh(&soc->ast_lock);
462 }
463 
464 
465 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
466 						struct cdp_peer *peer_hdl,
467 						uint8_t *wds_macaddr,
468 						uint32_t flags)
469 {
470 	int status = -1;
471 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
472 	struct dp_ast_entry  *ast_entry = NULL;
473 
474 	qdf_spin_lock_bh(&soc->ast_lock);
475 	ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
476 
477 	if (ast_entry) {
478 		status = dp_peer_update_ast(soc,
479 					    (struct dp_peer *)peer_hdl,
480 					   ast_entry, flags);
481 	}
482 
483 	qdf_spin_unlock_bh(&soc->ast_lock);
484 
485 	return status;
486 }
487 
488 /*
489  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
490  * @soc_handle:		Datapath SOC handle
491  * @wds_macaddr:	WDS entry MAC Address
492  * Return: None
493  */
494 static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
495 				   uint8_t *wds_macaddr, void *vdev_handle)
496 {
497 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
498 	struct dp_ast_entry *ast_entry = NULL;
499 
500 	qdf_spin_lock_bh(&soc->ast_lock);
501 	ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
502 
503 	if (ast_entry) {
504 		if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
505 		    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF)) {
506 			ast_entry->is_active = TRUE;
507 		}
508 	}
509 
510 	qdf_spin_unlock_bh(&soc->ast_lock);
511 }
512 
513 /*
514  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
515  * @soc:		Datapath SOC handle
516  *
517  * Return: None
518  */
519 static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
520 					 void *vdev_hdl)
521 {
522 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
523 	struct dp_pdev *pdev;
524 	struct dp_vdev *vdev;
525 	struct dp_peer *peer;
526 	struct dp_ast_entry *ase, *temp_ase;
527 	int i;
528 
529 	qdf_spin_lock_bh(&soc->ast_lock);
530 
531 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
532 		pdev = soc->pdev_list[i];
533 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
534 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
535 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
536 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
537 					if ((ase->type ==
538 					     CDP_TXRX_AST_TYPE_STATIC) ||
539 					    (ase->type ==
540 					     CDP_TXRX_AST_TYPE_SELF))
541 						continue;
542 					ase->is_active = TRUE;
543 				}
544 			}
545 		}
546 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
547 	}
548 
549 	qdf_spin_unlock_bh(&soc->ast_lock);
550 }
551 
552 /*
553  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
554  * @soc:		Datapath SOC handle
555  *
556  * Return: None
557  */
558 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
559 {
560 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
561 	struct dp_pdev *pdev;
562 	struct dp_vdev *vdev;
563 	struct dp_peer *peer;
564 	struct dp_ast_entry *ase, *temp_ase;
565 	int i;
566 
567 	qdf_spin_lock_bh(&soc->ast_lock);
568 
569 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
570 		pdev = soc->pdev_list[i];
571 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
572 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
573 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
574 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
575 					if ((ase->type ==
576 					     CDP_TXRX_AST_TYPE_STATIC) ||
577 					    (ase->type ==
578 					     CDP_TXRX_AST_TYPE_SELF))
579 						continue;
580 					dp_peer_del_ast(soc, ase);
581 				}
582 			}
583 		}
584 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
585 	}
586 
587 	qdf_spin_unlock_bh(&soc->ast_lock);
588 }
589 
590 static void *dp_peer_ast_hash_find_wifi3(struct cdp_soc_t *soc_hdl,
591 						uint8_t *ast_mac_addr)
592 {
593 	struct dp_ast_entry *ast_entry;
594 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
595 	qdf_spin_lock_bh(&soc->ast_lock);
596 	ast_entry = dp_peer_ast_hash_find(soc, ast_mac_addr);
597 	qdf_spin_unlock_bh(&soc->ast_lock);
598 	return (void *)ast_entry;
599 }
600 
601 static uint8_t dp_peer_ast_get_pdev_id_wifi3(struct cdp_soc_t *soc_hdl,
602 							void *ast_entry_hdl)
603 {
604 	return dp_peer_ast_get_pdev_id((struct dp_soc *)soc_hdl,
605 					(struct dp_ast_entry *)ast_entry_hdl);
606 }
607 
608 static uint8_t dp_peer_ast_get_next_hop_wifi3(struct cdp_soc_t *soc_hdl,
609 							void *ast_entry_hdl)
610 {
611 	return dp_peer_ast_get_next_hop((struct dp_soc *)soc_hdl,
612 					(struct dp_ast_entry *)ast_entry_hdl);
613 }
614 
615 static void dp_peer_ast_set_type_wifi3(
616 					struct cdp_soc_t *soc_hdl,
617 					void *ast_entry_hdl,
618 					enum cdp_txrx_ast_entry_type type)
619 {
620 	dp_peer_ast_set_type((struct dp_soc *)soc_hdl,
621 				(struct dp_ast_entry *)ast_entry_hdl,
622 				type);
623 }
624 
625 static enum cdp_txrx_ast_entry_type dp_peer_ast_get_type_wifi3(
626 					struct cdp_soc_t *soc_hdl,
627 					void *ast_entry_hdl)
628 {
629 	return ((struct dp_ast_entry *)ast_entry_hdl)->type;
630 }
631 
632 /**
633  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
634  * @ring_num: ring num of the ring being queried
635  * @grp_mask: the grp_mask array for the ring type in question.
636  *
637  * The grp_mask array is indexed by group number and the bit fields correspond
638  * to ring numbers.  We are finding which interrupt group a ring belongs to.
639  *
640  * Return: the index in the grp_mask array with the ring number.
641  * -QDF_STATUS_E_NOENT if no entry is found
642  */
643 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
644 {
645 	int ext_group_num;
646 	int mask = 1 << ring_num;
647 
648 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
649 	     ext_group_num++) {
650 		if (mask & grp_mask[ext_group_num])
651 			return ext_group_num;
652 	}
653 
654 	return -QDF_STATUS_E_NOENT;
655 }
656 
657 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
658 				       enum hal_ring_type ring_type,
659 				       int ring_num)
660 {
661 	int *grp_mask;
662 
663 	switch (ring_type) {
664 	case WBM2SW_RELEASE:
665 		/* dp_tx_comp_handler - soc->tx_comp_ring */
666 		if (ring_num < 3)
667 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
668 
669 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
670 		else if (ring_num == 3) {
671 			/* sw treats this as a separate ring type */
672 			grp_mask = &soc->wlan_cfg_ctx->
673 				int_rx_wbm_rel_ring_mask[0];
674 			ring_num = 0;
675 		} else {
676 			qdf_assert(0);
677 			return -QDF_STATUS_E_NOENT;
678 		}
679 	break;
680 
681 	case REO_EXCEPTION:
682 		/* dp_rx_err_process - &soc->reo_exception_ring */
683 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
684 	break;
685 
686 	case REO_DST:
687 		/* dp_rx_process - soc->reo_dest_ring */
688 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
689 	break;
690 
691 	case REO_STATUS:
692 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
693 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
694 	break;
695 
696 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
697 	case RXDMA_MONITOR_STATUS:
698 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
699 	case RXDMA_MONITOR_DST:
700 		/* dp_mon_process */
701 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
702 	break;
703 	case RXDMA_DST:
704 		/* dp_rxdma_err_process */
705 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
706 	break;
707 
708 	case RXDMA_BUF:
709 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
710 	break;
711 
712 	case RXDMA_MONITOR_BUF:
713 		/* TODO: support low_thresh interrupt */
714 		return -QDF_STATUS_E_NOENT;
715 	break;
716 
717 	case TCL_DATA:
718 	case TCL_CMD:
719 	case REO_CMD:
720 	case SW2WBM_RELEASE:
721 	case WBM_IDLE_LINK:
722 		/* normally empty SW_TO_HW rings */
723 		return -QDF_STATUS_E_NOENT;
724 	break;
725 
726 	case TCL_STATUS:
727 	case REO_REINJECT:
728 		/* misc unused rings */
729 		return -QDF_STATUS_E_NOENT;
730 	break;
731 
732 	case CE_SRC:
733 	case CE_DST:
734 	case CE_DST_STATUS:
735 		/* CE_rings - currently handled by hif */
736 	default:
737 		return -QDF_STATUS_E_NOENT;
738 	break;
739 	}
740 
741 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
742 }
743 
744 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
745 			      *ring_params, int ring_type, int ring_num)
746 {
747 	int msi_group_number;
748 	int msi_data_count;
749 	int ret;
750 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
751 
752 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
753 					    &msi_data_count, &msi_data_start,
754 					    &msi_irq_start);
755 
756 	if (ret)
757 		return;
758 
759 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
760 						       ring_num);
761 	if (msi_group_number < 0) {
762 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
763 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
764 			ring_type, ring_num);
765 		ring_params->msi_addr = 0;
766 		ring_params->msi_data = 0;
767 		return;
768 	}
769 
770 	if (msi_group_number > msi_data_count) {
771 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
772 			FL("2 msi_groups will share an msi; msi_group_num %d"),
773 			msi_group_number);
774 
775 		QDF_ASSERT(0);
776 	}
777 
778 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
779 
780 	ring_params->msi_addr = addr_low;
781 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
782 	ring_params->msi_data = (msi_group_number % msi_data_count)
783 		+ msi_data_start;
784 	ring_params->flags |= HAL_SRNG_MSI_INTR;
785 }
786 
787 /**
788  * dp_print_ast_stats() - Dump AST table contents
789  * @soc: Datapath soc handle
790  *
791  * return void
792  */
793 #ifdef FEATURE_AST
794 static void dp_print_ast_stats(struct dp_soc *soc)
795 {
796 	uint8_t i;
797 	uint8_t num_entries = 0;
798 	struct dp_vdev *vdev;
799 	struct dp_pdev *pdev;
800 	struct dp_peer *peer;
801 	struct dp_ast_entry *ase, *tmp_ase;
802 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
803 			"NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS"};
804 
805 	DP_PRINT_STATS("AST Stats:");
806 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
807 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
808 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
809 	DP_PRINT_STATS("AST Table:");
810 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
811 		pdev = soc->pdev_list[i];
812 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
813 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
814 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
815 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
816 					DP_PRINT_STATS("%6d mac_addr = %pM"
817 							" peer_mac_addr = %pM"
818 							" type = %s"
819 							" next_hop = %d"
820 							" is_active = %d"
821 							" is_bss = %d"
822 							" ast_idx = %d"
823 							" pdev_id = %d"
824 							" vdev_id = %d",
825 							++num_entries,
826 							ase->mac_addr.raw,
827 							ase->peer->mac_addr.raw,
828 							type[ase->type],
829 							ase->next_hop,
830 							ase->is_active,
831 							ase->is_bss,
832 							ase->ast_idx,
833 							ase->pdev_id,
834 							ase->vdev_id);
835 				}
836 			}
837 		}
838 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
839 	}
840 }
841 #else
842 static void dp_print_ast_stats(struct dp_soc *soc)
843 {
844 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
845 	return;
846 }
847 #endif
848 
849 static void dp_print_peer_table(struct dp_vdev *vdev)
850 {
851 	struct dp_peer *peer = NULL;
852 
853 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
854 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
855 		if (!peer) {
856 			DP_PRINT_STATS("Invalid Peer");
857 			return;
858 		}
859 		DP_PRINT_STATS("    peer_mac_addr = %pM"
860 			" nawds_enabled = %d"
861 			" bss_peer = %d"
862 			" wapi = %d"
863 			" wds_enabled = %d"
864 			" delete in progress = %d",
865 			peer->mac_addr.raw,
866 			peer->nawds_enabled,
867 			peer->bss_peer,
868 			peer->wapi,
869 			peer->wds_enabled,
870 			peer->delete_in_progress);
871 	}
872 }
873 
874 /*
875  * dp_setup_srng - Internal function to setup SRNG rings used by data path
876  */
877 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
878 	int ring_type, int ring_num, int mac_id, uint32_t num_entries)
879 {
880 	void *hal_soc = soc->hal_soc;
881 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
882 	/* TODO: See if we should get align size from hal */
883 	uint32_t ring_base_align = 8;
884 	struct hal_srng_params ring_params;
885 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
886 
887 	/* TODO: Currently hal layer takes care of endianness related settings.
888 	 * See if these settings need to passed from DP layer
889 	 */
890 	ring_params.flags = 0;
891 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
892 		  FL("Ring type: %d, num:%d"), ring_type, ring_num);
893 
894 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
895 	srng->hal_srng = NULL;
896 	srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
897 	srng->num_entries = num_entries;
898 	srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
899 		soc->osdev, soc->osdev->dev, srng->alloc_size,
900 		&(srng->base_paddr_unaligned));
901 
902 	if (!srng->base_vaddr_unaligned) {
903 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
904 			FL("alloc failed - ring_type: %d, ring_num %d"),
905 			ring_type, ring_num);
906 		return QDF_STATUS_E_NOMEM;
907 	}
908 
909 	ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
910 		((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
911 	ring_params.ring_base_paddr = srng->base_paddr_unaligned +
912 		((unsigned long)(ring_params.ring_base_vaddr) -
913 		(unsigned long)srng->base_vaddr_unaligned);
914 	ring_params.num_entries = num_entries;
915 
916 	if (soc->intr_mode == DP_INTR_MSI) {
917 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
918 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
919 			  FL("Using MSI for ring_type: %d, ring_num %d"),
920 			  ring_type, ring_num);
921 
922 	} else {
923 		ring_params.msi_data = 0;
924 		ring_params.msi_addr = 0;
925 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
926 			  FL("Skipping MSI for ring_type: %d, ring_num %d"),
927 			  ring_type, ring_num);
928 	}
929 
930 	/*
931 	 * Setup interrupt timer and batch counter thresholds for
932 	 * interrupt mitigation based on ring type
933 	 */
934 	if (ring_type == REO_DST) {
935 		ring_params.intr_timer_thres_us =
936 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
937 		ring_params.intr_batch_cntr_thres_entries =
938 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
939 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
940 		ring_params.intr_timer_thres_us =
941 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
942 		ring_params.intr_batch_cntr_thres_entries =
943 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
944 	} else {
945 		ring_params.intr_timer_thres_us =
946 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
947 		ring_params.intr_batch_cntr_thres_entries =
948 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
949 	}
950 
951 	/* Enable low threshold interrupts for rx buffer rings (regular and
952 	 * monitor buffer rings.
953 	 * TODO: See if this is required for any other ring
954 	 */
955 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
956 		(ring_type == RXDMA_MONITOR_STATUS)) {
957 		/* TODO: Setting low threshold to 1/8th of ring size
958 		 * see if this needs to be configurable
959 		 */
960 		ring_params.low_threshold = num_entries >> 3;
961 		ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
962 		ring_params.intr_timer_thres_us =
963 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
964 		ring_params.intr_batch_cntr_thres_entries = 0;
965 	}
966 
967 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
968 		mac_id, &ring_params);
969 
970 	if (!srng->hal_srng) {
971 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
972 				srng->alloc_size,
973 				srng->base_vaddr_unaligned,
974 				srng->base_paddr_unaligned, 0);
975 	}
976 
977 	return 0;
978 }
979 
980 /**
981  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
982  * Any buffers allocated and attached to ring entries are expected to be freed
983  * before calling this function.
984  */
985 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
986 	int ring_type, int ring_num)
987 {
988 	if (!srng->hal_srng) {
989 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
990 			FL("Ring type: %d, num:%d not setup"),
991 			ring_type, ring_num);
992 		return;
993 	}
994 
995 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
996 
997 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
998 				srng->alloc_size,
999 				srng->base_vaddr_unaligned,
1000 				srng->base_paddr_unaligned, 0);
1001 	srng->hal_srng = NULL;
1002 }
1003 
1004 /* TODO: Need this interface from HIF */
1005 void *hif_get_hal_handle(void *hif_handle);
1006 
1007 /*
1008  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1009  * @dp_ctx: DP SOC handle
1010  * @budget: Number of frames/descriptors that can be processed in one shot
1011  *
1012  * Return: remaining budget/quota for the soc device
1013  */
1014 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
1015 {
1016 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1017 	struct dp_soc *soc = int_ctx->soc;
1018 	int ring = 0;
1019 	uint32_t work_done  = 0;
1020 	int budget = dp_budget;
1021 	uint8_t tx_mask = int_ctx->tx_ring_mask;
1022 	uint8_t rx_mask = int_ctx->rx_ring_mask;
1023 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1024 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1025 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1026 	uint32_t remaining_quota = dp_budget;
1027 	struct dp_pdev *pdev = NULL;
1028 	int mac_id;
1029 
1030 	/* Process Tx completion interrupts first to return back buffers */
1031 	while (tx_mask) {
1032 		if (tx_mask & 0x1) {
1033 			work_done = dp_tx_comp_handler(soc,
1034 					soc->tx_comp_ring[ring].hal_srng,
1035 					remaining_quota);
1036 
1037 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1038 				"tx mask 0x%x ring %d, budget %d, work_done %d",
1039 				tx_mask, ring, budget, work_done);
1040 
1041 			budget -= work_done;
1042 			if (budget <= 0)
1043 				goto budget_done;
1044 
1045 			remaining_quota = budget;
1046 		}
1047 		tx_mask = tx_mask >> 1;
1048 		ring++;
1049 	}
1050 
1051 
1052 	/* Process REO Exception ring interrupt */
1053 	if (rx_err_mask) {
1054 		work_done = dp_rx_err_process(soc,
1055 				soc->reo_exception_ring.hal_srng,
1056 				remaining_quota);
1057 
1058 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1059 			"REO Exception Ring: work_done %d budget %d",
1060 			work_done, budget);
1061 
1062 		budget -=  work_done;
1063 		if (budget <= 0) {
1064 			goto budget_done;
1065 		}
1066 		remaining_quota = budget;
1067 	}
1068 
1069 	/* Process Rx WBM release ring interrupt */
1070 	if (rx_wbm_rel_mask) {
1071 		work_done = dp_rx_wbm_err_process(soc,
1072 				soc->rx_rel_ring.hal_srng, remaining_quota);
1073 
1074 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1075 			"WBM Release Ring: work_done %d budget %d",
1076 			work_done, budget);
1077 
1078 		budget -=  work_done;
1079 		if (budget <= 0) {
1080 			goto budget_done;
1081 		}
1082 		remaining_quota = budget;
1083 	}
1084 
1085 	/* Process Rx interrupts */
1086 	if (rx_mask) {
1087 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1088 			if (rx_mask & (1 << ring)) {
1089 				work_done = dp_rx_process(int_ctx,
1090 					    soc->reo_dest_ring[ring].hal_srng,
1091 					    remaining_quota);
1092 
1093 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1094 					"rx mask 0x%x ring %d, work_done %d budget %d",
1095 					rx_mask, ring, work_done, budget);
1096 
1097 				budget -=  work_done;
1098 				if (budget <= 0)
1099 					goto budget_done;
1100 				remaining_quota = budget;
1101 			}
1102 		}
1103 		for (ring = 0; ring < MAX_RX_MAC_RINGS; ring++) {
1104 			work_done = dp_rxdma_err_process(soc, ring,
1105 						remaining_quota);
1106 			budget -= work_done;
1107 		}
1108 	}
1109 
1110 	if (reo_status_mask)
1111 		dp_reo_status_ring_handler(soc);
1112 
1113 	/* Process LMAC interrupts */
1114 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
1115 		pdev = soc->pdev_list[ring];
1116 		if (pdev == NULL)
1117 			continue;
1118 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1119 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
1120 								pdev->pdev_id);
1121 
1122 			if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1123 				work_done = dp_mon_process(soc, mac_for_pdev,
1124 						remaining_quota);
1125 				budget -= work_done;
1126 				if (budget <= 0)
1127 					goto budget_done;
1128 				remaining_quota = budget;
1129 			}
1130 
1131 			if (int_ctx->rxdma2host_ring_mask &
1132 					(1 << mac_for_pdev)) {
1133 				work_done = dp_rxdma_err_process(soc,
1134 							mac_for_pdev,
1135 							remaining_quota);
1136 				budget -=  work_done;
1137 				if (budget <= 0)
1138 					goto budget_done;
1139 				remaining_quota = budget;
1140 			}
1141 
1142 			if (int_ctx->host2rxdma_ring_mask &
1143 						(1 << mac_for_pdev)) {
1144 				union dp_rx_desc_list_elem_t *desc_list = NULL;
1145 				union dp_rx_desc_list_elem_t *tail = NULL;
1146 				struct dp_srng *rx_refill_buf_ring =
1147 					&pdev->rx_refill_buf_ring;
1148 
1149 				DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1150 						1);
1151 				dp_rx_buffers_replenish(soc, mac_for_pdev,
1152 					rx_refill_buf_ring,
1153 					&soc->rx_desc_buf[mac_for_pdev], 0,
1154 					&desc_list, &tail);
1155 			}
1156 		}
1157 	}
1158 
1159 	qdf_lro_flush(int_ctx->lro_ctx);
1160 
1161 budget_done:
1162 	return dp_budget - budget;
1163 }
1164 
1165 #ifdef DP_INTR_POLL_BASED
1166 /* dp_interrupt_timer()- timer poll for interrupts
1167  *
1168  * @arg: SoC Handle
1169  *
1170  * Return:
1171  *
1172  */
1173 static void dp_interrupt_timer(void *arg)
1174 {
1175 	struct dp_soc *soc = (struct dp_soc *) arg;
1176 	int i;
1177 
1178 	if (qdf_atomic_read(&soc->cmn_init_done)) {
1179 		for (i = 0;
1180 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1181 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
1182 
1183 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1184 	}
1185 }
1186 
1187 /*
1188  * dp_soc_interrupt_attach_poll() - Register handlers for DP interrupts
1189  * @txrx_soc: DP SOC handle
1190  *
1191  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1192  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1193  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1194  *
1195  * Return: 0 for success. nonzero for failure.
1196  */
1197 static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
1198 {
1199 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1200 	int i;
1201 
1202 	soc->intr_mode = DP_INTR_POLL;
1203 
1204 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1205 		soc->intr_ctx[i].dp_intr_id = i;
1206 		soc->intr_ctx[i].tx_ring_mask =
1207 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1208 		soc->intr_ctx[i].rx_ring_mask =
1209 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1210 		soc->intr_ctx[i].rx_mon_ring_mask =
1211 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1212 		soc->intr_ctx[i].rx_err_ring_mask =
1213 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1214 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1215 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1216 		soc->intr_ctx[i].reo_status_ring_mask =
1217 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1218 		soc->intr_ctx[i].rxdma2host_ring_mask =
1219 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1220 		soc->intr_ctx[i].soc = soc;
1221 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1222 	}
1223 
1224 	qdf_timer_init(soc->osdev, &soc->int_timer,
1225 			dp_interrupt_timer, (void *)soc,
1226 			QDF_TIMER_TYPE_WAKE_APPS);
1227 
1228 	return QDF_STATUS_SUCCESS;
1229 }
1230 #else
1231 static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
1232 {
1233 	return -QDF_STATUS_E_NOSUPPORT;
1234 }
1235 #endif
1236 
1237 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
1238 #if defined(CONFIG_MCL)
1239 extern int con_mode_monitor;
1240 /*
1241  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1242  * @txrx_soc: DP SOC handle
1243  *
1244  * Call the appropriate attach function based on the mode of operation.
1245  * This is a WAR for enabling monitor mode.
1246  *
1247  * Return: 0 for success. nonzero for failure.
1248  */
1249 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1250 {
1251 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1252 
1253 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1254 	     con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
1255 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1256 				  "%s: Poll mode", __func__);
1257 		return dp_soc_attach_poll(txrx_soc);
1258 	} else {
1259 
1260 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1261 				  "%s: Interrupt  mode", __func__);
1262 		return dp_soc_interrupt_attach(txrx_soc);
1263 	}
1264 }
1265 #else
1266 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
1267 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1268 {
1269 	return dp_soc_attach_poll(txrx_soc);
1270 }
1271 #else
1272 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1273 {
1274 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1275 
1276 	if (hif_is_polled_mode_enabled(soc->hif_handle))
1277 		return dp_soc_attach_poll(txrx_soc);
1278 	else
1279 		return dp_soc_interrupt_attach(txrx_soc);
1280 }
1281 #endif
1282 #endif
1283 
1284 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1285 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1286 {
1287 	int j;
1288 	int num_irq = 0;
1289 
1290 	int tx_mask =
1291 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1292 	int rx_mask =
1293 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1294 	int rx_mon_mask =
1295 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1296 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1297 					soc->wlan_cfg_ctx, intr_ctx_num);
1298 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1299 					soc->wlan_cfg_ctx, intr_ctx_num);
1300 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1301 					soc->wlan_cfg_ctx, intr_ctx_num);
1302 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1303 					soc->wlan_cfg_ctx, intr_ctx_num);
1304 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1305 					soc->wlan_cfg_ctx, intr_ctx_num);
1306 
1307 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1308 
1309 		if (tx_mask & (1 << j)) {
1310 			irq_id_map[num_irq++] =
1311 				(wbm2host_tx_completions_ring1 - j);
1312 		}
1313 
1314 		if (rx_mask & (1 << j)) {
1315 			irq_id_map[num_irq++] =
1316 				(reo2host_destination_ring1 - j);
1317 		}
1318 
1319 		if (rxdma2host_ring_mask & (1 << j)) {
1320 			irq_id_map[num_irq++] =
1321 				rxdma2host_destination_ring_mac1 -
1322 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1323 		}
1324 
1325 		if (host2rxdma_ring_mask & (1 << j)) {
1326 			irq_id_map[num_irq++] =
1327 				host2rxdma_host_buf_ring_mac1 -
1328 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1329 		}
1330 
1331 		if (rx_mon_mask & (1 << j)) {
1332 			irq_id_map[num_irq++] =
1333 				ppdu_end_interrupts_mac1 -
1334 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1335 			irq_id_map[num_irq++] =
1336 				rxdma2host_monitor_status_ring_mac1 -
1337 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1338 		}
1339 
1340 		if (rx_wbm_rel_ring_mask & (1 << j))
1341 			irq_id_map[num_irq++] = wbm2host_rx_release;
1342 
1343 		if (rx_err_ring_mask & (1 << j))
1344 			irq_id_map[num_irq++] = reo2host_exception;
1345 
1346 		if (reo_status_ring_mask & (1 << j))
1347 			irq_id_map[num_irq++] = reo2host_status;
1348 
1349 	}
1350 	*num_irq_r = num_irq;
1351 }
1352 
1353 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1354 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1355 		int msi_vector_count, int msi_vector_start)
1356 {
1357 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1358 					soc->wlan_cfg_ctx, intr_ctx_num);
1359 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1360 					soc->wlan_cfg_ctx, intr_ctx_num);
1361 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1362 					soc->wlan_cfg_ctx, intr_ctx_num);
1363 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1364 					soc->wlan_cfg_ctx, intr_ctx_num);
1365 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1366 					soc->wlan_cfg_ctx, intr_ctx_num);
1367 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1368 					soc->wlan_cfg_ctx, intr_ctx_num);
1369 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1370 					soc->wlan_cfg_ctx, intr_ctx_num);
1371 
1372 	unsigned int vector =
1373 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1374 	int num_irq = 0;
1375 
1376 	soc->intr_mode = DP_INTR_MSI;
1377 
1378 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1379 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1380 		irq_id_map[num_irq++] =
1381 			pld_get_msi_irq(soc->osdev->dev, vector);
1382 
1383 	*num_irq_r = num_irq;
1384 }
1385 
1386 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1387 				    int *irq_id_map, int *num_irq)
1388 {
1389 	int msi_vector_count, ret;
1390 	uint32_t msi_base_data, msi_vector_start;
1391 
1392 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1393 					    &msi_vector_count,
1394 					    &msi_base_data,
1395 					    &msi_vector_start);
1396 	if (ret)
1397 		return dp_soc_interrupt_map_calculate_integrated(soc,
1398 				intr_ctx_num, irq_id_map, num_irq);
1399 
1400 	else
1401 		dp_soc_interrupt_map_calculate_msi(soc,
1402 				intr_ctx_num, irq_id_map, num_irq,
1403 				msi_vector_count, msi_vector_start);
1404 }
1405 
1406 /*
1407  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1408  * @txrx_soc: DP SOC handle
1409  *
1410  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1411  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1412  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1413  *
1414  * Return: 0 for success. nonzero for failure.
1415  */
1416 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
1417 {
1418 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1419 
1420 	int i = 0;
1421 	int num_irq = 0;
1422 
1423 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1424 		int ret = 0;
1425 
1426 		/* Map of IRQ ids registered with one interrupt context */
1427 		int irq_id_map[HIF_MAX_GRP_IRQ];
1428 
1429 		int tx_mask =
1430 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1431 		int rx_mask =
1432 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1433 		int rx_mon_mask =
1434 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
1435 		int rx_err_ring_mask =
1436 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1437 		int rx_wbm_rel_ring_mask =
1438 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1439 		int reo_status_ring_mask =
1440 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1441 		int rxdma2host_ring_mask =
1442 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1443 		int host2rxdma_ring_mask =
1444 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1445 
1446 
1447 		soc->intr_ctx[i].dp_intr_id = i;
1448 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1449 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1450 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1451 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1452 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1453 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1454 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1455 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1456 
1457 		soc->intr_ctx[i].soc = soc;
1458 
1459 		num_irq = 0;
1460 
1461 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1462 					       &num_irq);
1463 
1464 		ret = hif_register_ext_group(soc->hif_handle,
1465 				num_irq, irq_id_map, dp_service_srngs,
1466 				&soc->intr_ctx[i], "dp_intr",
1467 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1468 
1469 		if (ret) {
1470 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1471 			FL("failed, ret = %d"), ret);
1472 
1473 			return QDF_STATUS_E_FAILURE;
1474 		}
1475 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1476 	}
1477 
1478 	hif_configure_ext_group_interrupts(soc->hif_handle);
1479 
1480 	return QDF_STATUS_SUCCESS;
1481 }
1482 
1483 /*
1484  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1485  * @txrx_soc: DP SOC handle
1486  *
1487  * Return: void
1488  */
1489 static void dp_soc_interrupt_detach(void *txrx_soc)
1490 {
1491 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1492 	int i;
1493 
1494 	if (soc->intr_mode == DP_INTR_POLL) {
1495 		qdf_timer_stop(&soc->int_timer);
1496 		qdf_timer_free(&soc->int_timer);
1497 	} else {
1498 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1499 	}
1500 
1501 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1502 		soc->intr_ctx[i].tx_ring_mask = 0;
1503 		soc->intr_ctx[i].rx_ring_mask = 0;
1504 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1505 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1506 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1507 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1508 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1509 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1510 
1511 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1512 	}
1513 }
1514 
1515 #define AVG_MAX_MPDUS_PER_TID 128
1516 #define AVG_TIDS_PER_CLIENT 2
1517 #define AVG_FLOWS_PER_TID 2
1518 #define AVG_MSDUS_PER_FLOW 128
1519 #define AVG_MSDUS_PER_MPDU 4
1520 
1521 /*
1522  * Allocate and setup link descriptor pool that will be used by HW for
1523  * various link and queue descriptors and managed by WBM
1524  */
1525 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1526 {
1527 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1528 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1529 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1530 	uint32_t num_mpdus_per_link_desc =
1531 		hal_num_mpdus_per_link_desc(soc->hal_soc);
1532 	uint32_t num_msdus_per_link_desc =
1533 		hal_num_msdus_per_link_desc(soc->hal_soc);
1534 	uint32_t num_mpdu_links_per_queue_desc =
1535 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1536 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1537 	uint32_t total_link_descs, total_mem_size;
1538 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1539 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1540 	uint32_t num_link_desc_banks;
1541 	uint32_t last_bank_size = 0;
1542 	uint32_t entry_size, num_entries;
1543 	int i;
1544 	uint32_t desc_id = 0;
1545 
1546 	/* Only Tx queue descriptors are allocated from common link descriptor
1547 	 * pool Rx queue descriptors are not included in this because (REO queue
1548 	 * extension descriptors) they are expected to be allocated contiguously
1549 	 * with REO queue descriptors
1550 	 */
1551 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1552 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1553 
1554 	num_mpdu_queue_descs = num_mpdu_link_descs /
1555 		num_mpdu_links_per_queue_desc;
1556 
1557 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1558 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1559 		num_msdus_per_link_desc;
1560 
1561 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1562 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1563 
1564 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1565 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1566 
1567 	/* Round up to power of 2 */
1568 	total_link_descs = 1;
1569 	while (total_link_descs < num_entries)
1570 		total_link_descs <<= 1;
1571 
1572 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1573 		FL("total_link_descs: %u, link_desc_size: %d"),
1574 		total_link_descs, link_desc_size);
1575 	total_mem_size =  total_link_descs * link_desc_size;
1576 
1577 	total_mem_size += link_desc_align;
1578 
1579 	if (total_mem_size <= max_alloc_size) {
1580 		num_link_desc_banks = 0;
1581 		last_bank_size = total_mem_size;
1582 	} else {
1583 		num_link_desc_banks = (total_mem_size) /
1584 			(max_alloc_size - link_desc_align);
1585 		last_bank_size = total_mem_size %
1586 			(max_alloc_size - link_desc_align);
1587 	}
1588 
1589 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1590 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
1591 		total_mem_size, num_link_desc_banks);
1592 
1593 	for (i = 0; i < num_link_desc_banks; i++) {
1594 		soc->link_desc_banks[i].base_vaddr_unaligned =
1595 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1596 			max_alloc_size,
1597 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1598 		soc->link_desc_banks[i].size = max_alloc_size;
1599 
1600 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1601 			soc->link_desc_banks[i].base_vaddr_unaligned) +
1602 			((unsigned long)(
1603 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1604 			link_desc_align));
1605 
1606 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
1607 			soc->link_desc_banks[i].base_paddr_unaligned) +
1608 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1609 			(unsigned long)(
1610 			soc->link_desc_banks[i].base_vaddr_unaligned));
1611 
1612 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
1613 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1614 				FL("Link descriptor memory alloc failed"));
1615 			goto fail;
1616 		}
1617 	}
1618 
1619 	if (last_bank_size) {
1620 		/* Allocate last bank in case total memory required is not exact
1621 		 * multiple of max_alloc_size
1622 		 */
1623 		soc->link_desc_banks[i].base_vaddr_unaligned =
1624 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1625 			last_bank_size,
1626 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1627 		soc->link_desc_banks[i].size = last_bank_size;
1628 
1629 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1630 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
1631 			((unsigned long)(
1632 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1633 			link_desc_align));
1634 
1635 		soc->link_desc_banks[i].base_paddr =
1636 			(unsigned long)(
1637 			soc->link_desc_banks[i].base_paddr_unaligned) +
1638 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1639 			(unsigned long)(
1640 			soc->link_desc_banks[i].base_vaddr_unaligned));
1641 	}
1642 
1643 
1644 	/* Allocate and setup link descriptor idle list for HW internal use */
1645 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1646 	total_mem_size = entry_size * total_link_descs;
1647 
1648 	if (total_mem_size <= max_alloc_size) {
1649 		void *desc;
1650 
1651 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
1652 			WBM_IDLE_LINK, 0, 0, total_link_descs)) {
1653 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1654 				FL("Link desc idle ring setup failed"));
1655 			goto fail;
1656 		}
1657 
1658 		hal_srng_access_start_unlocked(soc->hal_soc,
1659 			soc->wbm_idle_link_ring.hal_srng);
1660 
1661 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1662 			soc->link_desc_banks[i].base_paddr; i++) {
1663 			uint32_t num_entries = (soc->link_desc_banks[i].size -
1664 				((unsigned long)(
1665 				soc->link_desc_banks[i].base_vaddr) -
1666 				(unsigned long)(
1667 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1668 				/ link_desc_size;
1669 			unsigned long paddr = (unsigned long)(
1670 				soc->link_desc_banks[i].base_paddr);
1671 
1672 			while (num_entries && (desc = hal_srng_src_get_next(
1673 				soc->hal_soc,
1674 				soc->wbm_idle_link_ring.hal_srng))) {
1675 				hal_set_link_desc_addr(desc,
1676 					LINK_DESC_COOKIE(desc_id, i), paddr);
1677 				num_entries--;
1678 				desc_id++;
1679 				paddr += link_desc_size;
1680 			}
1681 		}
1682 		hal_srng_access_end_unlocked(soc->hal_soc,
1683 			soc->wbm_idle_link_ring.hal_srng);
1684 	} else {
1685 		uint32_t num_scatter_bufs;
1686 		uint32_t num_entries_per_buf;
1687 		uint32_t rem_entries;
1688 		uint8_t *scatter_buf_ptr;
1689 		uint16_t scatter_buf_num;
1690 
1691 		soc->wbm_idle_scatter_buf_size =
1692 			hal_idle_list_scatter_buf_size(soc->hal_soc);
1693 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
1694 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
1695 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1696 					soc->hal_soc, total_mem_size,
1697 					soc->wbm_idle_scatter_buf_size);
1698 
1699 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
1700 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1701 					FL("scatter bufs size out of bounds"));
1702 			goto fail;
1703 		}
1704 
1705 		for (i = 0; i < num_scatter_bufs; i++) {
1706 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
1707 				qdf_mem_alloc_consistent(soc->osdev,
1708 							soc->osdev->dev,
1709 				soc->wbm_idle_scatter_buf_size,
1710 				&(soc->wbm_idle_scatter_buf_base_paddr[i]));
1711 			if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
1712 				QDF_TRACE(QDF_MODULE_ID_DP,
1713 						QDF_TRACE_LEVEL_ERROR,
1714 					FL("Scatter list memory alloc failed"));
1715 				goto fail;
1716 			}
1717 		}
1718 
1719 		/* Populate idle list scatter buffers with link descriptor
1720 		 * pointers
1721 		 */
1722 		scatter_buf_num = 0;
1723 		scatter_buf_ptr = (uint8_t *)(
1724 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
1725 		rem_entries = num_entries_per_buf;
1726 
1727 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1728 			soc->link_desc_banks[i].base_paddr; i++) {
1729 			uint32_t num_link_descs =
1730 				(soc->link_desc_banks[i].size -
1731 				((unsigned long)(
1732 				soc->link_desc_banks[i].base_vaddr) -
1733 				(unsigned long)(
1734 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1735 				/ link_desc_size;
1736 			unsigned long paddr = (unsigned long)(
1737 				soc->link_desc_banks[i].base_paddr);
1738 
1739 			while (num_link_descs) {
1740 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
1741 					LINK_DESC_COOKIE(desc_id, i), paddr);
1742 				num_link_descs--;
1743 				desc_id++;
1744 				paddr += link_desc_size;
1745 				rem_entries--;
1746 				if (rem_entries) {
1747 					scatter_buf_ptr += entry_size;
1748 				} else {
1749 					rem_entries = num_entries_per_buf;
1750 					scatter_buf_num++;
1751 
1752 					if (scatter_buf_num >= num_scatter_bufs)
1753 						break;
1754 
1755 					scatter_buf_ptr = (uint8_t *)(
1756 						soc->wbm_idle_scatter_buf_base_vaddr[
1757 						scatter_buf_num]);
1758 				}
1759 			}
1760 		}
1761 		/* Setup link descriptor idle list in HW */
1762 		hal_setup_link_idle_list(soc->hal_soc,
1763 			soc->wbm_idle_scatter_buf_base_paddr,
1764 			soc->wbm_idle_scatter_buf_base_vaddr,
1765 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
1766 			(uint32_t)(scatter_buf_ptr -
1767 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
1768 			scatter_buf_num-1])), total_link_descs);
1769 	}
1770 	return 0;
1771 
1772 fail:
1773 	if (soc->wbm_idle_link_ring.hal_srng) {
1774 		dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
1775 			WBM_IDLE_LINK, 0);
1776 	}
1777 
1778 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1779 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1780 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1781 				soc->wbm_idle_scatter_buf_size,
1782 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1783 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1784 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1785 		}
1786 	}
1787 
1788 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1789 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1790 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1791 				soc->link_desc_banks[i].size,
1792 				soc->link_desc_banks[i].base_vaddr_unaligned,
1793 				soc->link_desc_banks[i].base_paddr_unaligned,
1794 				0);
1795 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1796 		}
1797 	}
1798 	return QDF_STATUS_E_FAILURE;
1799 }
1800 
1801 /*
1802  * Free link descriptor pool that was setup HW
1803  */
1804 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
1805 {
1806 	int i;
1807 
1808 	if (soc->wbm_idle_link_ring.hal_srng) {
1809 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
1810 			WBM_IDLE_LINK, 0);
1811 	}
1812 
1813 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1814 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1815 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1816 				soc->wbm_idle_scatter_buf_size,
1817 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1818 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1819 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1820 		}
1821 	}
1822 
1823 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1824 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1825 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1826 				soc->link_desc_banks[i].size,
1827 				soc->link_desc_banks[i].base_vaddr_unaligned,
1828 				soc->link_desc_banks[i].base_paddr_unaligned,
1829 				0);
1830 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1831 		}
1832 	}
1833 }
1834 
1835 #define REO_DST_RING_SIZE_QCA6290 1024
1836 #define REO_DST_RING_SIZE_QCA8074 2048
1837 
1838 /*
1839  * dp_wds_aging_timer_fn() - Timer callback function for WDS aging
1840  * @soc: Datapath SOC handle
1841  *
1842  * This is a timer function used to age out stale AST nodes from
1843  * AST table
1844  */
1845 #ifdef FEATURE_WDS
1846 static void dp_wds_aging_timer_fn(void *soc_hdl)
1847 {
1848 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
1849 	struct dp_pdev *pdev;
1850 	struct dp_vdev *vdev;
1851 	struct dp_peer *peer;
1852 	struct dp_ast_entry *ase, *temp_ase;
1853 	int i;
1854 
1855 	qdf_spin_lock_bh(&soc->ast_lock);
1856 
1857 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1858 		pdev = soc->pdev_list[i];
1859 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
1860 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1861 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1862 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
1863 					/*
1864 					 * Do not expire static ast entries
1865 					 * and HM WDS entries
1866 					 */
1867 					if (ase->type != CDP_TXRX_AST_TYPE_WDS)
1868 						continue;
1869 
1870 					if (ase->is_active) {
1871 						ase->is_active = FALSE;
1872 						continue;
1873 					}
1874 
1875 					DP_STATS_INC(soc, ast.aged_out, 1);
1876 					dp_peer_del_ast(soc, ase);
1877 				}
1878 			}
1879 		}
1880 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1881 	}
1882 
1883 	qdf_spin_unlock_bh(&soc->ast_lock);
1884 
1885 	if (qdf_atomic_read(&soc->cmn_init_done))
1886 		qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1887 }
1888 
1889 
1890 /*
1891  * dp_soc_wds_attach() - Setup WDS timer and AST table
1892  * @soc:		Datapath SOC handle
1893  *
1894  * Return: None
1895  */
1896 static void dp_soc_wds_attach(struct dp_soc *soc)
1897 {
1898 	qdf_timer_init(soc->osdev, &soc->wds_aging_timer,
1899 			dp_wds_aging_timer_fn, (void *)soc,
1900 			QDF_TIMER_TYPE_WAKE_APPS);
1901 
1902 	qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1903 }
1904 
1905 /*
1906  * dp_soc_wds_detach() - Detach WDS data structures and timers
1907  * @txrx_soc: DP SOC handle
1908  *
1909  * Return: None
1910  */
1911 static void dp_soc_wds_detach(struct dp_soc *soc)
1912 {
1913 	qdf_timer_stop(&soc->wds_aging_timer);
1914 	qdf_timer_free(&soc->wds_aging_timer);
1915 }
1916 #else
1917 static void dp_soc_wds_attach(struct dp_soc *soc)
1918 {
1919 }
1920 
1921 static void dp_soc_wds_detach(struct dp_soc *soc)
1922 {
1923 }
1924 #endif
1925 
1926 /*
1927  * dp_soc_reset_ring_map() - Reset cpu ring map
1928  * @soc: Datapath soc handler
1929  *
1930  * This api resets the default cpu ring map
1931  */
1932 
1933 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
1934 {
1935 	uint8_t i;
1936 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1937 
1938 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
1939 		if (nss_config == 1) {
1940 			/*
1941 			 * Setting Tx ring map for one nss offloaded radio
1942 			 */
1943 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
1944 		} else if (nss_config == 2) {
1945 			/*
1946 			 * Setting Tx ring for two nss offloaded radios
1947 			 */
1948 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
1949 		} else {
1950 			/*
1951 			 * Setting Tx ring map for all nss offloaded radios
1952 			 */
1953 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_ALL_RADIO_OFFLOADED_MAP][i];
1954 		}
1955 	}
1956 }
1957 
1958 /*
1959  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
1960  * @dp_soc - DP soc handle
1961  * @ring_type - ring type
1962  * @ring_num - ring_num
1963  *
1964  * return 0 or 1
1965  */
1966 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
1967 {
1968 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1969 	uint8_t status = 0;
1970 
1971 	switch (ring_type) {
1972 	case WBM2SW_RELEASE:
1973 	case REO_DST:
1974 	case RXDMA_BUF:
1975 		status = ((nss_config) & (1 << ring_num));
1976 		break;
1977 	default:
1978 		break;
1979 	}
1980 
1981 	return status;
1982 }
1983 
1984 /*
1985  * dp_soc_reset_intr_mask() - reset interrupt mask
1986  * @dp_soc - DP Soc handle
1987  *
1988  * Return: Return void
1989  */
1990 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
1991 {
1992 	uint8_t j;
1993 	int *grp_mask = NULL;
1994 	int group_number, mask, num_ring;
1995 
1996 	/* number of tx ring */
1997 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
1998 
1999 	/*
2000 	 * group mask for tx completion  ring.
2001 	 */
2002 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2003 
2004 	/* loop and reset the mask for only offloaded ring */
2005 	for (j = 0; j < num_ring; j++) {
2006 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2007 			continue;
2008 		}
2009 
2010 		/*
2011 		 * Group number corresponding to tx offloaded ring.
2012 		 */
2013 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2014 		if (group_number < 0) {
2015 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2016 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2017 					WBM2SW_RELEASE, j);
2018 			return;
2019 		}
2020 
2021 		/* reset the tx mask for offloaded ring */
2022 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2023 		mask &= (~(1 << j));
2024 
2025 		/*
2026 		 * reset the interrupt mask for offloaded ring.
2027 		 */
2028 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2029 	}
2030 
2031 	/* number of rx rings */
2032 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2033 
2034 	/*
2035 	 * group mask for reo destination ring.
2036 	 */
2037 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2038 
2039 	/* loop and reset the mask for only offloaded ring */
2040 	for (j = 0; j < num_ring; j++) {
2041 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2042 			continue;
2043 		}
2044 
2045 		/*
2046 		 * Group number corresponding to rx offloaded ring.
2047 		 */
2048 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2049 		if (group_number < 0) {
2050 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2051 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2052 					REO_DST, j);
2053 			return;
2054 		}
2055 
2056 		/* set the interrupt mask for offloaded ring */
2057 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2058 		mask &= (~(1 << j));
2059 
2060 		/*
2061 		 * set the interrupt mask to zero for rx offloaded radio.
2062 		 */
2063 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2064 	}
2065 
2066 	/*
2067 	 * group mask for Rx buffer refill ring
2068 	 */
2069 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2070 
2071 	/* loop and reset the mask for only offloaded ring */
2072 	for (j = 0; j < MAX_PDEV_CNT; j++) {
2073 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2074 			continue;
2075 		}
2076 
2077 		/*
2078 		 * Group number corresponding to rx offloaded ring.
2079 		 */
2080 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2081 		if (group_number < 0) {
2082 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2083 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2084 					REO_DST, j);
2085 			return;
2086 		}
2087 
2088 		/* set the interrupt mask for offloaded ring */
2089 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2090 				group_number);
2091 		mask &= (~(1 << j));
2092 
2093 		/*
2094 		 * set the interrupt mask to zero for rx offloaded radio.
2095 		 */
2096 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2097 			group_number, mask);
2098 	}
2099 }
2100 
2101 #ifdef IPA_OFFLOAD
2102 /**
2103  * dp_reo_remap_config() - configure reo remap register value based
2104  *                         nss configuration.
2105  *		based on offload_radio value below remap configuration
2106  *		get applied.
2107  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2108  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2109  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2110  *		3 - both Radios handled by NSS (remap not required)
2111  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2112  *
2113  * @remap1: output parameter indicates reo remap 1 register value
2114  * @remap2: output parameter indicates reo remap 2 register value
2115  * Return: bool type, true if remap is configured else false.
2116  */
2117 static bool dp_reo_remap_config(struct dp_soc *soc,
2118 				uint32_t *remap1,
2119 				uint32_t *remap2)
2120 {
2121 
2122 	*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
2123 		(0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
2124 
2125 	*remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
2126 		(0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
2127 
2128 	return true;
2129 }
2130 #else
2131 static bool dp_reo_remap_config(struct dp_soc *soc,
2132 				uint32_t *remap1,
2133 				uint32_t *remap2)
2134 {
2135 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2136 
2137 	switch (offload_radio) {
2138 	case 0:
2139 		*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2140 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2141 			(0x3 << 18) | (0x4 << 21)) << 8;
2142 
2143 		*remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2144 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2145 			(0x3 << 18) | (0x4 << 21)) << 8;
2146 		break;
2147 
2148 	case 1:
2149 		*remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
2150 			(0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
2151 			(0x2 << 18) | (0x3 << 21)) << 8;
2152 
2153 		*remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
2154 			(0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
2155 			(0x4 << 18) | (0x2 << 21)) << 8;
2156 		break;
2157 
2158 	case 2:
2159 		*remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
2160 			(0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
2161 			(0x1 << 18) | (0x3 << 21)) << 8;
2162 
2163 		*remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
2164 			(0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
2165 			(0x4 << 18) | (0x1 << 21)) << 8;
2166 		break;
2167 
2168 	case 3:
2169 		/* return false if both radios are offloaded to NSS */
2170 		return false;
2171 	}
2172 	return true;
2173 }
2174 #endif
2175 
2176 /*
2177  * dp_reo_frag_dst_set() - configure reo register to set the
2178  *                        fragment destination ring
2179  * @soc : Datapath soc
2180  * @frag_dst_ring : output parameter to set fragment destination ring
2181  *
2182  * Based on offload_radio below fragment destination rings is selected
2183  * 0 - TCL
2184  * 1 - SW1
2185  * 2 - SW2
2186  * 3 - SW3
2187  * 4 - SW4
2188  * 5 - Release
2189  * 6 - FW
2190  * 7 - alternate select
2191  *
2192  * return: void
2193  */
2194 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2195 {
2196 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2197 
2198 	switch (offload_radio) {
2199 	case 0:
2200 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2201 		break;
2202 	case 3:
2203 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2204 		break;
2205 	default:
2206 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2207 				FL("dp_reo_frag_dst_set invalid offload radio config"));
2208 		break;
2209 	}
2210 }
2211 
2212 /*
2213  * dp_soc_cmn_setup() - Common SoC level initializion
2214  * @soc:		Datapath SOC handle
2215  *
2216  * This is an internal function used to setup common SOC data structures,
2217  * to be called from PDEV attach after receiving HW mode capabilities from FW
2218  */
2219 static int dp_soc_cmn_setup(struct dp_soc *soc)
2220 {
2221 	int i;
2222 	struct hal_reo_params reo_params;
2223 	int tx_ring_size;
2224 	int tx_comp_ring_size;
2225 	int reo_dst_ring_size;
2226 	uint32_t entries;
2227 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2228 
2229 	if (qdf_atomic_read(&soc->cmn_init_done))
2230 		return 0;
2231 
2232 	if (dp_hw_link_desc_pool_setup(soc))
2233 		goto fail1;
2234 
2235 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2236 	/* Setup SRNG rings */
2237 	/* Common rings */
2238 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
2239 		wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx))) {
2240 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2241 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
2242 		goto fail1;
2243 	}
2244 
2245 
2246 	soc->num_tcl_data_rings = 0;
2247 	/* Tx data rings */
2248 	if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
2249 		soc->num_tcl_data_rings =
2250 			wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
2251 		tx_comp_ring_size =
2252 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2253 		tx_ring_size =
2254 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2255 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2256 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
2257 				TCL_DATA, i, 0, tx_ring_size)) {
2258 				QDF_TRACE(QDF_MODULE_ID_DP,
2259 					QDF_TRACE_LEVEL_ERROR,
2260 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
2261 				goto fail1;
2262 			}
2263 			/*
2264 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2265 			 * count
2266 			 */
2267 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
2268 				WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
2269 				QDF_TRACE(QDF_MODULE_ID_DP,
2270 					QDF_TRACE_LEVEL_ERROR,
2271 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
2272 				goto fail1;
2273 			}
2274 		}
2275 	} else {
2276 		/* This will be incremented during per pdev ring setup */
2277 		soc->num_tcl_data_rings = 0;
2278 	}
2279 
2280 	if (dp_tx_soc_attach(soc)) {
2281 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2282 				FL("dp_tx_soc_attach failed"));
2283 		goto fail1;
2284 	}
2285 
2286 	entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
2287 	/* TCL command and status rings */
2288 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
2289 			  entries)) {
2290 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2291 			FL("dp_srng_setup failed for tcl_cmd_ring"));
2292 		goto fail1;
2293 	}
2294 
2295 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
2296 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
2297 			  entries)) {
2298 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2299 			FL("dp_srng_setup failed for tcl_status_ring"));
2300 		goto fail1;
2301 	}
2302 
2303 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2304 
2305 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2306 	 * descriptors
2307 	 */
2308 
2309 	/* Rx data rings */
2310 	if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2311 		soc->num_reo_dest_rings =
2312 			wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
2313 		QDF_TRACE(QDF_MODULE_ID_DP,
2314 			QDF_TRACE_LEVEL_INFO,
2315 			FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
2316 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2317 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2318 				i, 0, reo_dst_ring_size)) {
2319 				QDF_TRACE(QDF_MODULE_ID_DP,
2320 					  QDF_TRACE_LEVEL_ERROR,
2321 					  FL(RNG_ERR "reo_dest_ring [%d]"), i);
2322 				goto fail1;
2323 			}
2324 		}
2325 	} else {
2326 		/* This will be incremented during per pdev ring setup */
2327 		soc->num_reo_dest_rings = 0;
2328 	}
2329 
2330 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2331 	/* LMAC RxDMA to SW Rings configuration */
2332 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
2333 		/* Only valid for MCL */
2334 		struct dp_pdev *pdev = soc->pdev_list[0];
2335 
2336 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2337 			if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2338 					  RXDMA_DST, 0, i,
2339 					  entries)) {
2340 				QDF_TRACE(QDF_MODULE_ID_DP,
2341 					  QDF_TRACE_LEVEL_ERROR,
2342 					  FL(RNG_ERR "rxdma_err_dst_ring"));
2343 				goto fail1;
2344 			}
2345 		}
2346 	}
2347 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
2348 
2349 	/* REO reinjection ring */
2350 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
2351 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2352 			  entries)) {
2353 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2354 			  FL("dp_srng_setup failed for reo_reinject_ring"));
2355 		goto fail1;
2356 	}
2357 
2358 
2359 	/* Rx release ring */
2360 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2361 		wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx))) {
2362 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2363 			  FL("dp_srng_setup failed for rx_rel_ring"));
2364 		goto fail1;
2365 	}
2366 
2367 
2368 	/* Rx exception ring */
2369 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
2370 	if (dp_srng_setup(soc, &soc->reo_exception_ring,
2371 			  REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries)) {
2372 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2373 			  FL("dp_srng_setup failed for reo_exception_ring"));
2374 		goto fail1;
2375 	}
2376 
2377 
2378 	/* REO command and status rings */
2379 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
2380 		wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx))) {
2381 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2382 			FL("dp_srng_setup failed for reo_cmd_ring"));
2383 		goto fail1;
2384 	}
2385 
2386 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2387 	TAILQ_INIT(&soc->rx.reo_cmd_list);
2388 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2389 
2390 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
2391 		wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx))) {
2392 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2393 			FL("dp_srng_setup failed for reo_status_ring"));
2394 		goto fail1;
2395 	}
2396 
2397 	qdf_spinlock_create(&soc->ast_lock);
2398 	dp_soc_wds_attach(soc);
2399 
2400 	/* Reset the cpu ring map if radio is NSS offloaded */
2401 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
2402 		dp_soc_reset_cpu_ring_map(soc);
2403 		dp_soc_reset_intr_mask(soc);
2404 	}
2405 
2406 	/* Setup HW REO */
2407 	qdf_mem_zero(&reo_params, sizeof(reo_params));
2408 
2409 	if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
2410 
2411 		/*
2412 		 * Reo ring remap is not required if both radios
2413 		 * are offloaded to NSS
2414 		 */
2415 		if (!dp_reo_remap_config(soc,
2416 					&reo_params.remap1,
2417 					&reo_params.remap2))
2418 			goto out;
2419 
2420 		reo_params.rx_hash_enabled = true;
2421 	}
2422 
2423 	/* setup the global rx defrag waitlist */
2424 	TAILQ_INIT(&soc->rx.defrag.waitlist);
2425 	soc->rx.defrag.timeout_ms =
2426 		wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
2427 	soc->rx.flags.defrag_timeout_check =
2428 		wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
2429 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
2430 
2431 out:
2432 	/*
2433 	 * set the fragment destination ring
2434 	 */
2435 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2436 
2437 	hal_reo_setup(soc->hal_soc, &reo_params);
2438 
2439 	qdf_atomic_set(&soc->cmn_init_done, 1);
2440 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
2441 	return 0;
2442 fail1:
2443 	/*
2444 	 * Cleanup will be done as part of soc_detach, which will
2445 	 * be called on pdev attach failure
2446 	 */
2447 	return QDF_STATUS_E_FAILURE;
2448 }
2449 
2450 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
2451 
2452 static void dp_lro_hash_setup(struct dp_soc *soc)
2453 {
2454 	struct cdp_lro_hash_config lro_hash;
2455 
2456 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2457 		!wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2458 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2459 			 FL("LRO disabled RX hash disabled"));
2460 		return;
2461 	}
2462 
2463 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2464 
2465 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
2466 		lro_hash.lro_enable = 1;
2467 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2468 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
2469 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2470 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
2471 	}
2472 
2473 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, FL("enabled"));
2474 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
2475 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2476 		 LRO_IPV4_SEED_ARR_SZ));
2477 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2478 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2479 		 LRO_IPV6_SEED_ARR_SZ));
2480 
2481 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2482 		 "lro_hash: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
2483 		 lro_hash.lro_enable, lro_hash.tcp_flag,
2484 		 lro_hash.tcp_flag_mask);
2485 
2486 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2487 		 QDF_TRACE_LEVEL_ERROR,
2488 		 (void *)lro_hash.toeplitz_hash_ipv4,
2489 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2490 		 LRO_IPV4_SEED_ARR_SZ));
2491 
2492 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2493 		 QDF_TRACE_LEVEL_ERROR,
2494 		 (void *)lro_hash.toeplitz_hash_ipv6,
2495 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2496 		 LRO_IPV6_SEED_ARR_SZ));
2497 
2498 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2499 
2500 	if (soc->cdp_soc.ol_ops->lro_hash_config)
2501 		(void)soc->cdp_soc.ol_ops->lro_hash_config
2502 			(soc->ctrl_psoc, &lro_hash);
2503 }
2504 
2505 /*
2506 * dp_rxdma_ring_setup() - configure the RX DMA rings
2507 * @soc: data path SoC handle
2508 * @pdev: Physical device handle
2509 *
2510 * Return: 0 - success, > 0 - failure
2511 */
2512 #ifdef QCA_HOST2FW_RXBUF_RING
2513 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2514 	 struct dp_pdev *pdev)
2515 {
2516 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2517 	int max_mac_rings;
2518 	int i;
2519 
2520 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2521 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
2522 
2523 	for (i = 0; i < max_mac_rings; i++) {
2524 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2525 			 "%s: pdev_id %d mac_id %d",
2526 			 __func__, pdev->pdev_id, i);
2527 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
2528 			RXDMA_BUF, 1, i,
2529 			wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx))) {
2530 			QDF_TRACE(QDF_MODULE_ID_DP,
2531 				 QDF_TRACE_LEVEL_ERROR,
2532 				 FL("failed rx mac ring setup"));
2533 			return QDF_STATUS_E_FAILURE;
2534 		}
2535 	}
2536 	return QDF_STATUS_SUCCESS;
2537 }
2538 #else
2539 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2540 	 struct dp_pdev *pdev)
2541 {
2542 	return QDF_STATUS_SUCCESS;
2543 }
2544 #endif
2545 
2546 /**
2547  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2548  * @pdev - DP_PDEV handle
2549  *
2550  * Return: void
2551  */
2552 static inline void
2553 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2554 {
2555 	uint8_t map_id;
2556 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2557 		qdf_mem_copy(pdev->dscp_tid_map[map_id], default_dscp_tid_map,
2558 				sizeof(default_dscp_tid_map));
2559 	}
2560 	for (map_id = 0; map_id < HAL_MAX_HW_DSCP_TID_MAPS; map_id++) {
2561 		hal_tx_set_dscp_tid_map(pdev->soc->hal_soc,
2562 				pdev->dscp_tid_map[map_id],
2563 				map_id);
2564 	}
2565 }
2566 
2567 #ifdef QCA_SUPPORT_SON
2568 /**
2569  * dp_mark_peer_inact(): Update peer inactivity status
2570  * @peer_handle - datapath peer handle
2571  *
2572  * Return: void
2573  */
2574 void dp_mark_peer_inact(void *peer_handle, bool inactive)
2575 {
2576 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2577 	struct dp_pdev *pdev;
2578 	struct dp_soc *soc;
2579 	bool inactive_old;
2580 
2581 	if (!peer)
2582 		return;
2583 
2584 	pdev = peer->vdev->pdev;
2585 	soc = pdev->soc;
2586 
2587 	inactive_old = peer->peer_bs_inact_flag == 1;
2588 	if (!inactive)
2589 		peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2590 	peer->peer_bs_inact_flag = inactive ? 1 : 0;
2591 
2592 	if (inactive_old != inactive) {
2593 		/**
2594 		 * Note: a node lookup can happen in RX datapath context
2595 		 * when a node changes from inactive to active (at most once
2596 		 * per inactivity timeout threshold)
2597 		 */
2598 		if (soc->cdp_soc.ol_ops->record_act_change) {
2599 			soc->cdp_soc.ol_ops->record_act_change(
2600 					(void *)pdev->ctrl_pdev,
2601 					peer->mac_addr.raw, !inactive);
2602 		}
2603 	}
2604 }
2605 
2606 /**
2607  * dp_txrx_peer_find_inact_timeout_handler(): Inactivity timeout function
2608  *
2609  * Periodically checks the inactivity status
2610  */
2611 static os_timer_func(dp_txrx_peer_find_inact_timeout_handler)
2612 {
2613 	struct dp_pdev *pdev;
2614 	struct dp_vdev *vdev;
2615 	struct dp_peer *peer;
2616 	struct dp_soc *soc;
2617 	int i;
2618 
2619 	OS_GET_TIMER_ARG(soc, struct dp_soc *);
2620 
2621 	qdf_spin_lock(&soc->peer_ref_mutex);
2622 
2623 	for (i = 0; i < soc->pdev_count; i++) {
2624 	pdev = soc->pdev_list[i];
2625 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
2626 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2627 		if (vdev->opmode != wlan_op_mode_ap)
2628 			continue;
2629 
2630 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2631 			if (!peer->authorize) {
2632 				/**
2633 				 * Inactivity check only interested in
2634 				 * connected node
2635 				 */
2636 				continue;
2637 			}
2638 			if (peer->peer_bs_inact > soc->pdev_bs_inact_reload) {
2639 				/**
2640 				 * This check ensures we do not wait extra long
2641 				 * due to the potential race condition
2642 				 */
2643 				peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2644 			}
2645 			if (peer->peer_bs_inact > 0) {
2646 				/* Do not let it wrap around */
2647 				peer->peer_bs_inact--;
2648 			}
2649 			if (peer->peer_bs_inact == 0)
2650 				dp_mark_peer_inact(peer, true);
2651 		}
2652 	}
2653 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2654 	}
2655 
2656 	qdf_spin_unlock(&soc->peer_ref_mutex);
2657 	qdf_timer_mod(&soc->pdev_bs_inact_timer,
2658 		      soc->pdev_bs_inact_interval * 1000);
2659 }
2660 
2661 
2662 /**
2663  * dp_free_inact_timer(): free inact timer
2664  * @timer - inact timer handle
2665  *
2666  * Return: bool
2667  */
2668 void dp_free_inact_timer(struct dp_soc *soc)
2669 {
2670 	qdf_timer_free(&soc->pdev_bs_inact_timer);
2671 }
2672 #else
2673 
2674 void dp_mark_peer_inact(void *peer, bool inactive)
2675 {
2676 	return;
2677 }
2678 
2679 void dp_free_inact_timer(struct dp_soc *soc)
2680 {
2681 	return;
2682 }
2683 
2684 #endif
2685 
2686 #ifdef IPA_OFFLOAD
2687 /**
2688  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
2689  * @soc: data path instance
2690  * @pdev: core txrx pdev context
2691  *
2692  * Return: QDF_STATUS_SUCCESS: success
2693  *         QDF_STATUS_E_RESOURCES: Error return
2694  */
2695 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2696 					   struct dp_pdev *pdev)
2697 {
2698 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2699 	int entries;
2700 
2701 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2702 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
2703 
2704 	/* Setup second Rx refill buffer ring */
2705 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2706 			  IPA_RX_REFILL_BUF_RING_IDX,
2707 			  pdev->pdev_id,
2708 			  entries)) {
2709 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2710 			FL("dp_srng_setup failed second rx refill ring"));
2711 		return QDF_STATUS_E_FAILURE;
2712 	}
2713 	return QDF_STATUS_SUCCESS;
2714 }
2715 
2716 /**
2717  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
2718  * @soc: data path instance
2719  * @pdev: core txrx pdev context
2720  *
2721  * Return: void
2722  */
2723 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2724 					      struct dp_pdev *pdev)
2725 {
2726 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2727 			IPA_RX_REFILL_BUF_RING_IDX);
2728 }
2729 
2730 #else
2731 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2732 					   struct dp_pdev *pdev)
2733 {
2734 	return QDF_STATUS_SUCCESS;
2735 }
2736 
2737 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2738 					      struct dp_pdev *pdev)
2739 {
2740 }
2741 #endif
2742 
2743 #ifndef QCA_WIFI_QCA6390
2744 static
2745 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2746 {
2747 	int mac_id = 0;
2748 	int pdev_id = pdev->pdev_id;
2749 	int entries;
2750 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2751 
2752 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2753 
2754 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
2755 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
2756 
2757 		entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
2758 		if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
2759 				  RXDMA_MONITOR_BUF, 0, mac_for_pdev,
2760 				  entries)) {
2761 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2762 				  FL(RNG_ERR "rxdma_mon_buf_ring "));
2763 			return QDF_STATUS_E_NOMEM;
2764 		}
2765 
2766 		entries = wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
2767 		if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
2768 				  RXDMA_MONITOR_DST, 0, mac_for_pdev,
2769 				  entries)) {
2770 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2771 				  FL(RNG_ERR "rxdma_mon_dst_ring"));
2772 			return QDF_STATUS_E_NOMEM;
2773 		}
2774 
2775 		entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
2776 		if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring[mac_id],
2777 				  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
2778 				  entries)) {
2779 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2780 				  FL(RNG_ERR "rxdma_mon_status_ring"));
2781 			return QDF_STATUS_E_NOMEM;
2782 		}
2783 
2784 		entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
2785 		if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
2786 				  RXDMA_MONITOR_DESC, 0, mac_for_pdev,
2787 				  entries)) {
2788 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2789 				  FL(RNG_ERR "rxdma_mon_desc_ring"));
2790 			return QDF_STATUS_E_NOMEM;
2791 		}
2792 	}
2793 	return QDF_STATUS_SUCCESS;
2794 }
2795 #else
2796 static QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2797 {
2798 	return QDF_STATUS_SUCCESS;
2799 }
2800 #endif
2801 
2802 /*
2803 * dp_pdev_attach_wifi3() - attach txrx pdev
2804 * @ctrl_pdev: Opaque PDEV object
2805 * @txrx_soc: Datapath SOC handle
2806 * @htc_handle: HTC handle for host-target interface
2807 * @qdf_osdev: QDF OS device
2808 * @pdev_id: PDEV ID
2809 *
2810 * Return: DP PDEV handle on success, NULL on failure
2811 */
2812 static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
2813 	struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
2814 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
2815 {
2816 	int tx_ring_size;
2817 	int tx_comp_ring_size;
2818 	int reo_dst_ring_size;
2819 	int entries;
2820 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2821 	int nss_cfg;
2822 
2823 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2824 	struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
2825 
2826 	if (!pdev) {
2827 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2828 			FL("DP PDEV memory allocation failed"));
2829 		goto fail0;
2830 	}
2831 
2832 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2833 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
2834 
2835 	if (!pdev->wlan_cfg_ctx) {
2836 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2837 			FL("pdev cfg_attach failed"));
2838 
2839 		qdf_mem_free(pdev);
2840 		goto fail0;
2841 	}
2842 
2843 	/*
2844 	 * set nss pdev config based on soc config
2845 	 */
2846 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
2847 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
2848 			(nss_cfg & (1 << pdev_id)));
2849 
2850 	pdev->soc = soc;
2851 	pdev->ctrl_pdev = ctrl_pdev;
2852 	pdev->pdev_id = pdev_id;
2853 	soc->pdev_list[pdev_id] = pdev;
2854 	soc->pdev_count++;
2855 
2856 	TAILQ_INIT(&pdev->vdev_list);
2857 	qdf_spinlock_create(&pdev->vdev_list_lock);
2858 	pdev->vdev_count = 0;
2859 
2860 	qdf_spinlock_create(&pdev->tx_mutex);
2861 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
2862 	TAILQ_INIT(&pdev->neighbour_peers_list);
2863 	pdev->neighbour_peers_added = false;
2864 
2865 	if (dp_soc_cmn_setup(soc)) {
2866 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2867 			FL("dp_soc_cmn_setup failed"));
2868 		goto fail1;
2869 	}
2870 
2871 	/* Setup per PDEV TCL rings if configured */
2872 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2873 		tx_ring_size =
2874 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2875 		tx_comp_ring_size =
2876 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2877 
2878 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
2879 			pdev_id, pdev_id, tx_ring_size)) {
2880 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2881 				FL("dp_srng_setup failed for tcl_data_ring"));
2882 			goto fail1;
2883 		}
2884 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
2885 			WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
2886 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2887 				FL("dp_srng_setup failed for tx_comp_ring"));
2888 			goto fail1;
2889 		}
2890 		soc->num_tcl_data_rings++;
2891 	}
2892 
2893 	/* Tx specific init */
2894 	if (dp_tx_pdev_attach(pdev)) {
2895 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2896 			FL("dp_tx_pdev_attach failed"));
2897 		goto fail1;
2898 	}
2899 
2900 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2901 	/* Setup per PDEV REO rings if configured */
2902 	if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2903 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
2904 			pdev_id, pdev_id, reo_dst_ring_size)) {
2905 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2906 				FL("dp_srng_setup failed for reo_dest_ringn"));
2907 			goto fail1;
2908 		}
2909 		soc->num_reo_dest_rings++;
2910 
2911 	}
2912 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
2913 		wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx))) {
2914 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2915 			 FL("dp_srng_setup failed rx refill ring"));
2916 		goto fail1;
2917 	}
2918 
2919 	if (dp_rxdma_ring_setup(soc, pdev)) {
2920 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2921 			 FL("RXDMA ring config failed"));
2922 		goto fail1;
2923 	}
2924 
2925 	if (dp_mon_rings_setup(soc, pdev)) {
2926 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2927 			  FL("MONITOR rings setup failed"));
2928 		goto fail1;
2929 	}
2930 
2931 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2932 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2933 		if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
2934 				  0, pdev_id,
2935 				  entries)) {
2936 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2937 				  FL(RNG_ERR "rxdma_err_dst_ring"));
2938 			goto fail1;
2939 		}
2940 	}
2941 
2942 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
2943 		goto fail1;
2944 
2945 	if (dp_ipa_ring_resource_setup(soc, pdev))
2946 		goto fail1;
2947 
2948 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
2949 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2950 			FL("dp_ipa_uc_attach failed"));
2951 		goto fail1;
2952 	}
2953 
2954 	/* Rx specific init */
2955 	if (dp_rx_pdev_attach(pdev)) {
2956 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2957 			FL("dp_rx_pdev_attach failed"));
2958 		goto fail0;
2959 	}
2960 	DP_STATS_INIT(pdev);
2961 
2962 	/* Monitor filter init */
2963 	pdev->mon_filter_mode = MON_FILTER_ALL;
2964 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
2965 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
2966 	pdev->fp_data_filter = FILTER_DATA_ALL;
2967 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
2968 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
2969 	pdev->mo_data_filter = FILTER_DATA_ALL;
2970 
2971 	dp_local_peer_id_pool_init(pdev);
2972 
2973 	dp_dscp_tid_map_setup(pdev);
2974 
2975 	/* Rx monitor mode specific init */
2976 	if (dp_rx_pdev_mon_attach(pdev)) {
2977 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2978 				"dp_rx_pdev_attach failed");
2979 		goto fail1;
2980 	}
2981 
2982 	if (dp_wdi_event_attach(pdev)) {
2983 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2984 				"dp_wdi_evet_attach failed");
2985 		goto fail1;
2986 	}
2987 
2988 	/* set the reo destination during initialization */
2989 	pdev->reo_dest = pdev->pdev_id + 1;
2990 
2991 	/*
2992 	 * initialize ppdu tlv list
2993 	 */
2994 	TAILQ_INIT(&pdev->ppdu_info_list);
2995 	pdev->tlv_count = 0;
2996 	pdev->list_depth = 0;
2997 
2998 	return (struct cdp_pdev *)pdev;
2999 
3000 fail1:
3001 	dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
3002 
3003 fail0:
3004 	return NULL;
3005 }
3006 
3007 /*
3008 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
3009 * @soc: data path SoC handle
3010 * @pdev: Physical device handle
3011 *
3012 * Return: void
3013 */
3014 #ifdef QCA_HOST2FW_RXBUF_RING
3015 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3016 	 struct dp_pdev *pdev)
3017 {
3018 	int max_mac_rings =
3019 		 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
3020 	int i;
3021 
3022 	max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
3023 				max_mac_rings : MAX_RX_MAC_RINGS;
3024 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
3025 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
3026 			 RXDMA_BUF, 1);
3027 
3028 	qdf_timer_free(&soc->mon_reap_timer);
3029 }
3030 #else
3031 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3032 	 struct dp_pdev *pdev)
3033 {
3034 }
3035 #endif
3036 
3037 /*
3038  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3039  * @pdev: device object
3040  *
3041  * Return: void
3042  */
3043 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3044 {
3045 	struct dp_neighbour_peer *peer = NULL;
3046 	struct dp_neighbour_peer *temp_peer = NULL;
3047 
3048 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3049 			neighbour_peer_list_elem, temp_peer) {
3050 		/* delete this peer from the list */
3051 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
3052 				peer, neighbour_peer_list_elem);
3053 		qdf_mem_free(peer);
3054 	}
3055 
3056 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3057 }
3058 
3059 /**
3060 * dp_htt_ppdu_stats_detach() - detach stats resources
3061 * @pdev: Datapath PDEV handle
3062 *
3063 * Return: void
3064 */
3065 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3066 {
3067 	struct ppdu_info *ppdu_info, *ppdu_info_next;
3068 
3069 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3070 			ppdu_info_list_elem, ppdu_info_next) {
3071 		if (!ppdu_info)
3072 			break;
3073 		qdf_assert_always(ppdu_info->nbuf);
3074 		qdf_nbuf_free(ppdu_info->nbuf);
3075 		qdf_mem_free(ppdu_info);
3076 	}
3077 }
3078 
3079 #ifndef QCA_WIFI_QCA6390
3080 static
3081 void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3082 			int mac_id)
3083 {
3084 		dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
3085 				RXDMA_MONITOR_BUF, 0);
3086 		dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
3087 				RXDMA_MONITOR_DST, 0);
3088 
3089 		dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring[mac_id],
3090 				RXDMA_MONITOR_STATUS, 0);
3091 
3092 		dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
3093 				RXDMA_MONITOR_DESC, 0);
3094 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3095 				RXDMA_DST, 0);
3096 }
3097 #else
3098 static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3099 			       int mac_id)
3100 {
3101 }
3102 #endif
3103 
3104 /*
3105 * dp_pdev_detach_wifi3() - detach txrx pdev
3106 * @txrx_pdev: Datapath PDEV handle
3107 * @force: Force detach
3108 *
3109 */
3110 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
3111 {
3112 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3113 	struct dp_soc *soc = pdev->soc;
3114 	qdf_nbuf_t curr_nbuf, next_nbuf;
3115 	int mac_id;
3116 
3117 	dp_wdi_event_detach(pdev);
3118 
3119 	dp_tx_pdev_detach(pdev);
3120 
3121 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3122 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
3123 			TCL_DATA, pdev->pdev_id);
3124 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
3125 			WBM2SW_RELEASE, pdev->pdev_id);
3126 	}
3127 
3128 	dp_pktlogmod_exit(pdev);
3129 
3130 	dp_rx_pdev_detach(pdev);
3131 	dp_rx_pdev_mon_detach(pdev);
3132 	dp_neighbour_peers_detach(pdev);
3133 	qdf_spinlock_destroy(&pdev->tx_mutex);
3134 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
3135 
3136 	dp_ipa_uc_detach(soc, pdev);
3137 
3138 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
3139 
3140 	/* Cleanup per PDEV REO rings if configured */
3141 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3142 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
3143 			REO_DST, pdev->pdev_id);
3144 	}
3145 
3146 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
3147 
3148 	dp_rxdma_ring_cleanup(soc, pdev);
3149 
3150 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3151 		dp_mon_ring_deinit(soc, pdev, mac_id);
3152 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3153 			RXDMA_DST, 0);
3154 	}
3155 
3156 	curr_nbuf = pdev->invalid_peer_head_msdu;
3157 	while (curr_nbuf) {
3158 		next_nbuf = qdf_nbuf_next(curr_nbuf);
3159 		qdf_nbuf_free(curr_nbuf);
3160 		curr_nbuf = next_nbuf;
3161 	}
3162 
3163 	dp_htt_ppdu_stats_detach(pdev);
3164 
3165 	soc->pdev_list[pdev->pdev_id] = NULL;
3166 	soc->pdev_count--;
3167 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
3168 	qdf_mem_free(pdev->dp_txrx_handle);
3169 	qdf_mem_free(pdev);
3170 }
3171 
3172 /*
3173  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
3174  * @soc: DP SOC handle
3175  */
3176 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
3177 {
3178 	struct reo_desc_list_node *desc;
3179 	struct dp_rx_tid *rx_tid;
3180 
3181 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3182 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
3183 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3184 		rx_tid = &desc->rx_tid;
3185 		qdf_mem_unmap_nbytes_single(soc->osdev,
3186 			rx_tid->hw_qdesc_paddr,
3187 			QDF_DMA_BIDIRECTIONAL,
3188 			rx_tid->hw_qdesc_alloc_size);
3189 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3190 		qdf_mem_free(desc);
3191 	}
3192 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3193 	qdf_list_destroy(&soc->reo_desc_freelist);
3194 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
3195 }
3196 
3197 /*
3198  * dp_soc_detach_wifi3() - Detach txrx SOC
3199  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
3200  */
3201 static void dp_soc_detach_wifi3(void *txrx_soc)
3202 {
3203 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3204 	int i;
3205 
3206 	qdf_atomic_set(&soc->cmn_init_done, 0);
3207 
3208 	qdf_flush_work(&soc->htt_stats.work);
3209 	qdf_disable_work(&soc->htt_stats.work);
3210 
3211 	/* Free pending htt stats messages */
3212 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
3213 
3214 	dp_free_inact_timer(soc);
3215 
3216 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3217 		if (soc->pdev_list[i])
3218 			dp_pdev_detach_wifi3(
3219 				(struct cdp_pdev *)soc->pdev_list[i], 1);
3220 	}
3221 
3222 	dp_peer_find_detach(soc);
3223 
3224 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
3225 	 * SW descriptors
3226 	 */
3227 
3228 	/* Free the ring memories */
3229 	/* Common rings */
3230 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3231 
3232 	dp_tx_soc_detach(soc);
3233 	/* Tx data rings */
3234 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3235 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3236 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
3237 				TCL_DATA, i);
3238 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
3239 				WBM2SW_RELEASE, i);
3240 		}
3241 	}
3242 
3243 	/* TCL command and status rings */
3244 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3245 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3246 
3247 	/* Rx data rings */
3248 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3249 		soc->num_reo_dest_rings =
3250 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3251 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
3252 			/* TODO: Get number of rings and ring sizes
3253 			 * from wlan_cfg
3254 			 */
3255 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
3256 				REO_DST, i);
3257 		}
3258 	}
3259 	/* REO reinjection ring */
3260 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3261 
3262 	/* Rx release ring */
3263 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3264 
3265 	/* Rx exception ring */
3266 	/* TODO: Better to store ring_type and ring_num in
3267 	 * dp_srng during setup
3268 	 */
3269 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3270 
3271 	/* REO command and status rings */
3272 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3273 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
3274 	dp_hw_link_desc_pool_cleanup(soc);
3275 
3276 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
3277 	qdf_spinlock_destroy(&soc->htt_stats.lock);
3278 
3279 	htt_soc_detach(soc->htt_handle);
3280 
3281 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3282 
3283 	dp_reo_cmdlist_destroy(soc);
3284 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3285 	dp_reo_desc_freelist_destroy(soc);
3286 
3287 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
3288 
3289 	dp_soc_wds_detach(soc);
3290 	qdf_spinlock_destroy(&soc->ast_lock);
3291 
3292 	qdf_mem_free(soc);
3293 }
3294 
3295 #ifndef QCA_WIFI_QCA6390
3296 static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3297 				  struct dp_pdev *pdev,
3298 				  int mac_id,
3299 				  int mac_for_pdev)
3300 {
3301 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3302 		       pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3303 		       RXDMA_MONITOR_BUF);
3304 
3305 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3306 		       pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3307 		       RXDMA_MONITOR_DST);
3308 
3309 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3310 		       pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3311 		       RXDMA_MONITOR_STATUS);
3312 
3313 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3314 		       pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3315 		       RXDMA_MONITOR_DESC);
3316 }
3317 #else
3318 static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3319 				  struct dp_pdev *pdev,
3320 				  int mac_id,
3321 				  int mac_for_pdev)
3322 {
3323 }
3324 #endif
3325 /*
3326  * dp_rxdma_ring_config() - configure the RX DMA rings
3327  *
3328  * This function is used to configure the MAC rings.
3329  * On MCL host provides buffers in Host2FW ring
3330  * FW refills (copies) buffers to the ring and updates
3331  * ring_idx in register
3332  *
3333  * @soc: data path SoC handle
3334  *
3335  * Return: void
3336  */
3337 #ifdef QCA_HOST2FW_RXBUF_RING
3338 static void dp_rxdma_ring_config(struct dp_soc *soc)
3339 {
3340 	int i;
3341 
3342 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3343 		struct dp_pdev *pdev = soc->pdev_list[i];
3344 
3345 		if (pdev) {
3346 			int mac_id;
3347 			bool dbs_enable = 0;
3348 			int max_mac_rings =
3349 				 wlan_cfg_get_num_mac_rings
3350 				(pdev->wlan_cfg_ctx);
3351 
3352 			htt_srng_setup(soc->htt_handle, 0,
3353 				 pdev->rx_refill_buf_ring.hal_srng,
3354 				 RXDMA_BUF);
3355 
3356 			if (pdev->rx_refill_buf_ring2.hal_srng)
3357 				htt_srng_setup(soc->htt_handle, 0,
3358 					pdev->rx_refill_buf_ring2.hal_srng,
3359 					RXDMA_BUF);
3360 
3361 			if (soc->cdp_soc.ol_ops->
3362 				is_hw_dbs_2x2_capable) {
3363 				dbs_enable = soc->cdp_soc.ol_ops->
3364 					is_hw_dbs_2x2_capable(soc->ctrl_psoc);
3365 			}
3366 
3367 			if (dbs_enable) {
3368 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3369 				QDF_TRACE_LEVEL_ERROR,
3370 				FL("DBS enabled max_mac_rings %d"),
3371 					 max_mac_rings);
3372 			} else {
3373 				max_mac_rings = 1;
3374 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3375 					 QDF_TRACE_LEVEL_ERROR,
3376 					 FL("DBS disabled, max_mac_rings %d"),
3377 					 max_mac_rings);
3378 			}
3379 
3380 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3381 					 FL("pdev_id %d max_mac_rings %d"),
3382 					 pdev->pdev_id, max_mac_rings);
3383 
3384 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
3385 				int mac_for_pdev = dp_get_mac_id_for_pdev(
3386 							mac_id, pdev->pdev_id);
3387 
3388 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3389 					 QDF_TRACE_LEVEL_ERROR,
3390 					 FL("mac_id %d"), mac_for_pdev);
3391 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3392 					 pdev->rx_mac_buf_ring[mac_id]
3393 						.hal_srng,
3394 					 RXDMA_BUF);
3395 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3396 					pdev->rxdma_err_dst_ring[mac_id]
3397 						.hal_srng,
3398 					RXDMA_DST);
3399 
3400 				/* Configure monitor mode rings */
3401 				dp_mon_htt_srng_setup(soc, pdev, mac_id,
3402 						      mac_for_pdev);
3403 
3404 			}
3405 		}
3406 	}
3407 
3408 	/*
3409 	 * Timer to reap rxdma status rings.
3410 	 * Needed until we enable ppdu end interrupts
3411 	 */
3412 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
3413 			dp_service_mon_rings, (void *)soc,
3414 			QDF_TIMER_TYPE_WAKE_APPS);
3415 	soc->reap_timer_init = 1;
3416 }
3417 #else
3418 /* This is only for WIN */
3419 static void dp_rxdma_ring_config(struct dp_soc *soc)
3420 {
3421 	int i;
3422 	int mac_id;
3423 
3424 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3425 		struct dp_pdev *pdev = soc->pdev_list[i];
3426 
3427 		if (pdev == NULL)
3428 			continue;
3429 
3430 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3431 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
3432 
3433 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3434 				pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
3435 
3436 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3437 				pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3438 				RXDMA_MONITOR_BUF);
3439 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3440 				pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3441 				RXDMA_MONITOR_DST);
3442 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3443 				pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3444 				RXDMA_MONITOR_STATUS);
3445 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3446 				pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3447 				RXDMA_MONITOR_DESC);
3448 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3449 				pdev->rxdma_err_dst_ring[mac_id].hal_srng,
3450 				RXDMA_DST);
3451 		}
3452 	}
3453 }
3454 #endif
3455 
3456 /*
3457  * dp_soc_attach_target_wifi3() - SOC initialization in the target
3458  * @txrx_soc: Datapath SOC handle
3459  */
3460 static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
3461 {
3462 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
3463 
3464 	htt_soc_attach_target(soc->htt_handle);
3465 
3466 	dp_rxdma_ring_config(soc);
3467 
3468 	DP_STATS_INIT(soc);
3469 
3470 	/* initialize work queue for stats processing */
3471 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
3472 
3473 	return 0;
3474 }
3475 
3476 /*
3477  * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
3478  * @txrx_soc: Datapath SOC handle
3479  */
3480 static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
3481 {
3482 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3483 	return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
3484 }
3485 /*
3486  * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
3487  * @txrx_soc: Datapath SOC handle
3488  * @nss_cfg: nss config
3489  */
3490 static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
3491 {
3492 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3493 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
3494 
3495 	wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
3496 
3497 	/*
3498 	 * TODO: masked out based on the per offloaded radio
3499 	 */
3500 	if (config == dp_nss_cfg_dbdc) {
3501 		wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
3502 		wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
3503 		wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
3504 		wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
3505 	}
3506 
3507 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3508 		  FL("nss-wifi<0> nss config is enabled"));
3509 }
3510 /*
3511 * dp_vdev_attach_wifi3() - attach txrx vdev
3512 * @txrx_pdev: Datapath PDEV handle
3513 * @vdev_mac_addr: MAC address of the virtual interface
3514 * @vdev_id: VDEV Id
3515 * @wlan_op_mode: VDEV operating mode
3516 *
3517 * Return: DP VDEV handle on success, NULL on failure
3518 */
3519 static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
3520 	uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
3521 {
3522 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3523 	struct dp_soc *soc = pdev->soc;
3524 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
3525 
3526 	if (!vdev) {
3527 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3528 			FL("DP VDEV memory allocation failed"));
3529 		goto fail0;
3530 	}
3531 
3532 	vdev->pdev = pdev;
3533 	vdev->vdev_id = vdev_id;
3534 	vdev->opmode = op_mode;
3535 	vdev->osdev = soc->osdev;
3536 
3537 	vdev->osif_rx = NULL;
3538 	vdev->osif_rsim_rx_decap = NULL;
3539 	vdev->osif_get_key = NULL;
3540 	vdev->osif_rx_mon = NULL;
3541 	vdev->osif_tx_free_ext = NULL;
3542 	vdev->osif_vdev = NULL;
3543 
3544 	vdev->delete.pending = 0;
3545 	vdev->safemode = 0;
3546 	vdev->drop_unenc = 1;
3547 	vdev->sec_type = cdp_sec_type_none;
3548 #ifdef notyet
3549 	vdev->filters_num = 0;
3550 #endif
3551 
3552 	qdf_mem_copy(
3553 		&vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3554 
3555 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3556 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3557 	vdev->dscp_tid_map_id = 0;
3558 	vdev->mcast_enhancement_en = 0;
3559 
3560 	/* TODO: Initialize default HTT meta data that will be used in
3561 	 * TCL descriptors for packets transmitted from this VDEV
3562 	 */
3563 
3564 	TAILQ_INIT(&vdev->peer_list);
3565 
3566 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3567 	/* add this vdev into the pdev's list */
3568 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
3569 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3570 	pdev->vdev_count++;
3571 
3572 	dp_tx_vdev_attach(vdev);
3573 
3574 
3575 	if ((soc->intr_mode == DP_INTR_POLL) &&
3576 			wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
3577 		if (pdev->vdev_count == 1)
3578 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
3579 	}
3580 
3581 	dp_lro_hash_setup(soc);
3582 
3583 	/* LRO */
3584 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3585 		wlan_op_mode_sta == vdev->opmode)
3586 		vdev->lro_enable = true;
3587 
3588 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3589 		 "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable);
3590 
3591 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3592 		"Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
3593 	DP_STATS_INIT(vdev);
3594 
3595 	if (wlan_op_mode_sta == vdev->opmode)
3596 		dp_peer_create_wifi3((struct cdp_vdev *)vdev,
3597 							vdev->mac_addr.raw,
3598 							NULL);
3599 
3600 	return (struct cdp_vdev *)vdev;
3601 
3602 fail0:
3603 	return NULL;
3604 }
3605 
3606 /**
3607  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
3608  * @vdev: Datapath VDEV handle
3609  * @osif_vdev: OSIF vdev handle
3610  * @ctrl_vdev: UMAC vdev handle
3611  * @txrx_ops: Tx and Rx operations
3612  *
3613  * Return: DP VDEV handle on success, NULL on failure
3614  */
3615 static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
3616 	void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
3617 	struct ol_txrx_ops *txrx_ops)
3618 {
3619 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3620 	vdev->osif_vdev = osif_vdev;
3621 	vdev->ctrl_vdev = ctrl_vdev;
3622 	vdev->osif_rx = txrx_ops->rx.rx;
3623 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
3624 	vdev->osif_get_key = txrx_ops->get_key;
3625 	vdev->osif_rx_mon = txrx_ops->rx.mon;
3626 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
3627 #ifdef notyet
3628 #if ATH_SUPPORT_WAPI
3629 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
3630 #endif
3631 #endif
3632 #ifdef UMAC_SUPPORT_PROXY_ARP
3633 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
3634 #endif
3635 	vdev->me_convert = txrx_ops->me_convert;
3636 
3637 	/* TODO: Enable the following once Tx code is integrated */
3638 	if (vdev->mesh_vdev)
3639 		txrx_ops->tx.tx = dp_tx_send_mesh;
3640 	else
3641 		txrx_ops->tx.tx = dp_tx_send;
3642 
3643 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
3644 
3645 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
3646 		"DP Vdev Register success");
3647 }
3648 
3649 /**
3650  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
3651  * @vdev: Datapath VDEV handle
3652  *
3653  * Return: void
3654  */
3655 static void dp_vdev_flush_peers(struct dp_vdev *vdev)
3656 {
3657 	struct dp_pdev *pdev = vdev->pdev;
3658 	struct dp_soc *soc = pdev->soc;
3659 	struct dp_peer *peer;
3660 	uint16_t *peer_ids;
3661 	uint8_t i = 0, j = 0;
3662 
3663 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
3664 	if (!peer_ids) {
3665 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3666 			"DP alloc failure - unable to flush peers");
3667 		return;
3668 	}
3669 
3670 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3671 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3672 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3673 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
3674 				if (j < soc->max_peers)
3675 					peer_ids[j++] = peer->peer_ids[i];
3676 	}
3677 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3678 
3679 	for (i = 0; i < j ; i++)
3680 		dp_rx_peer_unmap_handler(soc, peer_ids[i]);
3681 
3682 	qdf_mem_free(peer_ids);
3683 
3684 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3685 		FL("Flushed peers for vdev object %pK "), vdev);
3686 }
3687 
3688 /*
3689  * dp_vdev_detach_wifi3() - Detach txrx vdev
3690  * @txrx_vdev:		Datapath VDEV handle
3691  * @callback:		Callback OL_IF on completion of detach
3692  * @cb_context:	Callback context
3693  *
3694  */
3695 static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
3696 	ol_txrx_vdev_delete_cb callback, void *cb_context)
3697 {
3698 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3699 	struct dp_pdev *pdev = vdev->pdev;
3700 	struct dp_soc *soc = pdev->soc;
3701 	struct dp_neighbour_peer *peer = NULL;
3702 
3703 	/* preconditions */
3704 	qdf_assert(vdev);
3705 
3706 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3707 	/* remove the vdev from its parent pdev's list */
3708 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
3709 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3710 
3711 	if (wlan_op_mode_sta == vdev->opmode)
3712 		dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
3713 
3714 	/*
3715 	 * If Target is hung, flush all peers before detaching vdev
3716 	 * this will free all references held due to missing
3717 	 * unmap commands from Target
3718 	 */
3719 	if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
3720 		dp_vdev_flush_peers(vdev);
3721 
3722 	/*
3723 	 * Use peer_ref_mutex while accessing peer_list, in case
3724 	 * a peer is in the process of being removed from the list.
3725 	 */
3726 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3727 	/* check that the vdev has no peers allocated */
3728 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
3729 		/* debug print - will be removed later */
3730 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
3731 			FL("not deleting vdev object %pK (%pM)"
3732 			"until deletion finishes for all its peers"),
3733 			vdev, vdev->mac_addr.raw);
3734 		/* indicate that the vdev needs to be deleted */
3735 		vdev->delete.pending = 1;
3736 		vdev->delete.callback = callback;
3737 		vdev->delete.context = cb_context;
3738 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3739 		return;
3740 	}
3741 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3742 
3743 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3744 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
3745 		      neighbour_peer_list_elem) {
3746 		QDF_ASSERT(peer->vdev != vdev);
3747 	}
3748 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3749 
3750 	dp_tx_vdev_detach(vdev);
3751 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3752 		FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
3753 
3754 	qdf_mem_free(vdev);
3755 
3756 	if (callback)
3757 		callback(cb_context);
3758 }
3759 
3760 /*
3761  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
3762  * @soc - datapath soc handle
3763  * @peer - datapath peer handle
3764  *
3765  * Delete the AST entries belonging to a peer
3766  */
3767 #ifdef FEATURE_AST
3768 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3769 					      struct dp_peer *peer)
3770 {
3771 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
3772 
3773 	qdf_spin_lock_bh(&soc->ast_lock);
3774 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
3775 		dp_peer_del_ast(soc, ast_entry);
3776 
3777 	peer->self_ast_entry = NULL;
3778 	TAILQ_INIT(&peer->ast_entry_list);
3779 	qdf_spin_unlock_bh(&soc->ast_lock);
3780 }
3781 #else
3782 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3783 					      struct dp_peer *peer)
3784 {
3785 }
3786 #endif
3787 
3788 #if ATH_SUPPORT_WRAP
3789 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
3790 						uint8_t *peer_mac_addr)
3791 {
3792 	struct dp_peer *peer;
3793 
3794 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
3795 				      0, vdev->vdev_id);
3796 	if (!peer)
3797 		return NULL;
3798 
3799 	if (peer->bss_peer)
3800 		return peer;
3801 
3802 	qdf_atomic_dec(&peer->ref_cnt);
3803 	return NULL;
3804 }
3805 #else
3806 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
3807 						uint8_t *peer_mac_addr)
3808 {
3809 	struct dp_peer *peer;
3810 
3811 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
3812 				      0, vdev->vdev_id);
3813 	if (!peer)
3814 		return NULL;
3815 
3816 	if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
3817 		return peer;
3818 
3819 	qdf_atomic_dec(&peer->ref_cnt);
3820 	return NULL;
3821 }
3822 #endif
3823 
3824 /*
3825  * dp_peer_create_wifi3() - attach txrx peer
3826  * @txrx_vdev: Datapath VDEV handle
3827  * @peer_mac_addr: Peer MAC address
3828  *
3829  * Return: DP peeer handle on success, NULL on failure
3830  */
3831 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
3832 		uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
3833 {
3834 	struct dp_peer *peer;
3835 	int i;
3836 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3837 	struct dp_pdev *pdev;
3838 	struct dp_soc *soc;
3839 	struct dp_ast_entry *ast_entry;
3840 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
3841 
3842 	/* preconditions */
3843 	qdf_assert(vdev);
3844 	qdf_assert(peer_mac_addr);
3845 
3846 	pdev = vdev->pdev;
3847 	soc = pdev->soc;
3848 
3849 	/*
3850 	 * If a peer entry with given MAC address already exists,
3851 	 * reuse the peer and reset the state of peer.
3852 	 */
3853 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
3854 
3855 	if (peer) {
3856 		peer->delete_in_progress = false;
3857 
3858 		dp_peer_delete_ast_entries(soc, peer);
3859 
3860 		if ((vdev->opmode == wlan_op_mode_sta) &&
3861 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
3862 		     DP_MAC_ADDR_LEN)) {
3863 			ast_type = CDP_TXRX_AST_TYPE_SELF;
3864 		}
3865 
3866 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
3867 
3868 		/*
3869 		* Control path maintains a node count which is incremented
3870 		* for every new peer create command. Since new peer is not being
3871 		* created and earlier reference is reused here,
3872 		* peer_unref_delete event is sent to control path to
3873 		* increment the count back.
3874 		*/
3875 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
3876 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
3877 				vdev->vdev_id, peer->mac_addr.raw);
3878 		}
3879 		peer->ctrl_peer = ctrl_peer;
3880 
3881 		dp_local_peer_id_alloc(pdev, peer);
3882 		DP_STATS_INIT(peer);
3883 
3884 		return (void *)peer;
3885 	} else {
3886 		/*
3887 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
3888 		 * need to remove the AST entry which was earlier added as a WDS
3889 		 * entry.
3890 		 * If an AST entry exists, but no peer entry exists with a given
3891 		 * MAC addresses, we could deduce it as a WDS entry
3892 		 */
3893 		qdf_spin_lock_bh(&soc->ast_lock);
3894 		ast_entry = dp_peer_ast_hash_find(soc, peer_mac_addr);
3895 		if (ast_entry)
3896 			dp_peer_del_ast(soc, ast_entry);
3897 		qdf_spin_unlock_bh(&soc->ast_lock);
3898 	}
3899 
3900 #ifdef notyet
3901 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
3902 		soc->mempool_ol_ath_peer);
3903 #else
3904 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
3905 #endif
3906 
3907 	if (!peer)
3908 		return NULL; /* failure */
3909 
3910 	qdf_mem_zero(peer, sizeof(struct dp_peer));
3911 
3912 	TAILQ_INIT(&peer->ast_entry_list);
3913 
3914 	/* store provided params */
3915 	peer->vdev = vdev;
3916 	peer->ctrl_peer = ctrl_peer;
3917 
3918 	if ((vdev->opmode == wlan_op_mode_sta) &&
3919 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
3920 			 DP_MAC_ADDR_LEN)) {
3921 		ast_type = CDP_TXRX_AST_TYPE_SELF;
3922 	}
3923 
3924 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
3925 
3926 	qdf_spinlock_create(&peer->peer_info_lock);
3927 
3928 	qdf_mem_copy(
3929 		&peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3930 
3931 	/* TODO: See of rx_opt_proc is really required */
3932 	peer->rx_opt_proc = soc->rx_opt_proc;
3933 
3934 	/* initialize the peer_id */
3935 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3936 		peer->peer_ids[i] = HTT_INVALID_PEER;
3937 
3938 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3939 
3940 	qdf_atomic_init(&peer->ref_cnt);
3941 
3942 	/* keep one reference for attach */
3943 	qdf_atomic_inc(&peer->ref_cnt);
3944 
3945 	/* add this peer into the vdev's list */
3946 	if (wlan_op_mode_sta == vdev->opmode)
3947 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
3948 	else
3949 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
3950 
3951 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3952 
3953 	/* TODO: See if hash based search is required */
3954 	dp_peer_find_hash_add(soc, peer);
3955 
3956 	/* Initialize the peer state */
3957 	peer->state = OL_TXRX_PEER_STATE_DISC;
3958 
3959 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3960 		"vdev %pK created peer %pK (%pM) ref_cnt: %d",
3961 		vdev, peer, peer->mac_addr.raw,
3962 		qdf_atomic_read(&peer->ref_cnt));
3963 	/*
3964 	 * For every peer MAp message search and set if bss_peer
3965 	 */
3966 	if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
3967 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3968 			"vdev bss_peer!!!!");
3969 		peer->bss_peer = 1;
3970 		vdev->vap_bss_peer = peer;
3971 	}
3972 	for (i = 0; i < DP_MAX_TIDS; i++)
3973 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
3974 
3975 	dp_local_peer_id_alloc(pdev, peer);
3976 	DP_STATS_INIT(peer);
3977 	return (void *)peer;
3978 }
3979 
3980 /*
3981  * dp_peer_setup_wifi3() - initialize the peer
3982  * @vdev_hdl: virtual device object
3983  * @peer: Peer object
3984  *
3985  * Return: void
3986  */
3987 static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
3988 {
3989 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
3990 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
3991 	struct dp_pdev *pdev;
3992 	struct dp_soc *soc;
3993 	bool hash_based = 0;
3994 	enum cdp_host_reo_dest_ring reo_dest;
3995 
3996 	/* preconditions */
3997 	qdf_assert(vdev);
3998 	qdf_assert(peer);
3999 
4000 	pdev = vdev->pdev;
4001 	soc = pdev->soc;
4002 
4003 	peer->last_assoc_rcvd = 0;
4004 	peer->last_disassoc_rcvd = 0;
4005 	peer->last_deauth_rcvd = 0;
4006 
4007 	/*
4008 	 * hash based steering is disabled for Radios which are offloaded
4009 	 * to NSS
4010 	 */
4011 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
4012 		hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
4013 
4014 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4015 		FL("hash based steering for pdev: %d is %d"),
4016 		pdev->pdev_id, hash_based);
4017 
4018 	/*
4019 	 * Below line of code will ensure the proper reo_dest ring is chosen
4020 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
4021 	 */
4022 	reo_dest = pdev->reo_dest;
4023 
4024 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
4025 		/* TODO: Check the destination ring number to be passed to FW */
4026 		soc->cdp_soc.ol_ops->peer_set_default_routing(
4027 				pdev->ctrl_pdev, peer->mac_addr.raw,
4028 				peer->vdev->vdev_id, hash_based, reo_dest);
4029 	}
4030 
4031 	dp_peer_rx_init(pdev, peer);
4032 	return;
4033 }
4034 
4035 /*
4036  * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
4037  * @vdev_handle: virtual device object
4038  * @htt_pkt_type: type of pkt
4039  *
4040  * Return: void
4041  */
4042 static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
4043 	 enum htt_cmn_pkt_type val)
4044 {
4045 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4046 	vdev->tx_encap_type = val;
4047 }
4048 
4049 /*
4050  * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
4051  * @vdev_handle: virtual device object
4052  * @htt_pkt_type: type of pkt
4053  *
4054  * Return: void
4055  */
4056 static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
4057 	 enum htt_cmn_pkt_type val)
4058 {
4059 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4060 	vdev->rx_decap_type = val;
4061 }
4062 
4063 /*
4064  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
4065  * @pdev_handle: physical device object
4066  * @val: reo destination ring index (1 - 4)
4067  *
4068  * Return: void
4069  */
4070 static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
4071 	 enum cdp_host_reo_dest_ring val)
4072 {
4073 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4074 
4075 	if (pdev)
4076 		pdev->reo_dest = val;
4077 }
4078 
4079 /*
4080  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
4081  * @pdev_handle: physical device object
4082  *
4083  * Return: reo destination ring index
4084  */
4085 static enum cdp_host_reo_dest_ring
4086 dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
4087 {
4088 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4089 
4090 	if (pdev)
4091 		return pdev->reo_dest;
4092 	else
4093 		return cdp_host_reo_dest_ring_unknown;
4094 }
4095 
4096 #ifdef QCA_SUPPORT_SON
4097 static void dp_son_peer_authorize(struct dp_peer *peer)
4098 {
4099 	struct dp_soc *soc;
4100 	soc = peer->vdev->pdev->soc;
4101 	peer->peer_bs_inact_flag = 0;
4102 	peer->peer_bs_inact = soc->pdev_bs_inact_reload;
4103 	return;
4104 }
4105 #else
4106 static void dp_son_peer_authorize(struct dp_peer *peer)
4107 {
4108 	return;
4109 }
4110 #endif
4111 /*
4112  * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
4113  * @pdev_handle: device object
4114  * @val: value to be set
4115  *
4116  * Return: void
4117  */
4118 static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
4119 	 uint32_t val)
4120 {
4121 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4122 
4123 	/* Enable/Disable smart mesh filtering. This flag will be checked
4124 	 * during rx processing to check if packets are from NAC clients.
4125 	 */
4126 	pdev->filter_neighbour_peers = val;
4127 	return 0;
4128 }
4129 
4130 /*
4131  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
4132  * address for smart mesh filtering
4133  * @vdev_handle: virtual device object
4134  * @cmd: Add/Del command
4135  * @macaddr: nac client mac address
4136  *
4137  * Return: void
4138  */
4139 static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
4140 					    uint32_t cmd, uint8_t *macaddr)
4141 {
4142 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4143 	struct dp_pdev *pdev = vdev->pdev;
4144 	struct dp_neighbour_peer *peer = NULL;
4145 
4146 	if (!macaddr)
4147 		goto fail0;
4148 
4149 	/* Store address of NAC (neighbour peer) which will be checked
4150 	 * against TA of received packets.
4151 	 */
4152 	if (cmd == DP_NAC_PARAM_ADD) {
4153 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
4154 				sizeof(*peer));
4155 
4156 		if (!peer) {
4157 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4158 				FL("DP neighbour peer node memory allocation failed"));
4159 			goto fail0;
4160 		}
4161 
4162 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
4163 			macaddr, DP_MAC_ADDR_LEN);
4164 		peer->vdev = vdev;
4165 
4166 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4167 
4168 		/* add this neighbour peer into the list */
4169 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
4170 				neighbour_peer_list_elem);
4171 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4172 
4173 		/* first neighbour */
4174 		if (!pdev->neighbour_peers_added) {
4175 			if (!pdev->mcopy_mode && !pdev->enhanced_stats_en)
4176 				dp_ppdu_ring_cfg(pdev);
4177 			pdev->neighbour_peers_added = true;
4178 		}
4179 		return 1;
4180 
4181 	} else if (cmd == DP_NAC_PARAM_DEL) {
4182 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4183 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
4184 				neighbour_peer_list_elem) {
4185 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
4186 				macaddr, DP_MAC_ADDR_LEN)) {
4187 				/* delete this peer from the list */
4188 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
4189 					peer, neighbour_peer_list_elem);
4190 				qdf_mem_free(peer);
4191 				break;
4192 			}
4193 		}
4194 		/* last neighbour deleted */
4195 		if (TAILQ_EMPTY(&pdev->neighbour_peers_list))
4196 			pdev->neighbour_peers_added = false;
4197 
4198 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4199 
4200 		if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
4201 		    !pdev->enhanced_stats_en)
4202 			dp_ppdu_ring_reset(pdev);
4203 		return 1;
4204 
4205 	}
4206 
4207 fail0:
4208 	return 0;
4209 }
4210 
4211 /*
4212  * dp_get_sec_type() - Get the security type
4213  * @peer:		Datapath peer handle
4214  * @sec_idx:    Security id (mcast, ucast)
4215  *
4216  * return sec_type: Security type
4217  */
4218 static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
4219 {
4220 	struct dp_peer *dpeer = (struct dp_peer *)peer;
4221 
4222 	return dpeer->security[sec_idx].sec_type;
4223 }
4224 
4225 /*
4226  * dp_peer_authorize() - authorize txrx peer
4227  * @peer_handle:		Datapath peer handle
4228  * @authorize
4229  *
4230  */
4231 static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
4232 {
4233 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4234 	struct dp_soc *soc;
4235 
4236 	if (peer != NULL) {
4237 		soc = peer->vdev->pdev->soc;
4238 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
4239 		dp_son_peer_authorize(peer);
4240 		peer->authorize = authorize ? 1 : 0;
4241 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4242 	}
4243 }
4244 
4245 #ifdef QCA_SUPPORT_SON
4246 /*
4247  * dp_txrx_update_inact_threshold() - Update inact timer threshold
4248  * @pdev_handle: Device handle
4249  * @new_threshold : updated threshold value
4250  *
4251  */
4252 static void
4253 dp_txrx_update_inact_threshold(struct cdp_pdev *pdev_handle,
4254 			       u_int16_t new_threshold)
4255 {
4256 	struct dp_vdev *vdev;
4257 	struct dp_peer *peer;
4258 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4259 	struct dp_soc *soc = pdev->soc;
4260 	u_int16_t old_threshold = soc->pdev_bs_inact_reload;
4261 
4262 	if (old_threshold == new_threshold)
4263 		return;
4264 
4265 	soc->pdev_bs_inact_reload = new_threshold;
4266 
4267 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4268 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4269 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4270 		if (vdev->opmode != wlan_op_mode_ap)
4271 			continue;
4272 
4273 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4274 			if (!peer->authorize)
4275 				continue;
4276 
4277 			if (old_threshold - peer->peer_bs_inact >=
4278 					new_threshold) {
4279 				dp_mark_peer_inact((void *)peer, true);
4280 				peer->peer_bs_inact = 0;
4281 			} else {
4282 				peer->peer_bs_inact = new_threshold -
4283 					(old_threshold - peer->peer_bs_inact);
4284 			}
4285 		}
4286 	}
4287 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4288 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4289 }
4290 
4291 /**
4292  * dp_txrx_reset_inact_count(): Reset inact count
4293  * @pdev_handle - device handle
4294  *
4295  * Return: void
4296  */
4297 static void
4298 dp_txrx_reset_inact_count(struct cdp_pdev *pdev_handle)
4299 {
4300 	struct dp_vdev *vdev = NULL;
4301 	struct dp_peer *peer = NULL;
4302 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4303 	struct dp_soc *soc = pdev->soc;
4304 
4305 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4306 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4307 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4308 		if (vdev->opmode != wlan_op_mode_ap)
4309 			continue;
4310 
4311 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4312 			if (!peer->authorize)
4313 				continue;
4314 
4315 			peer->peer_bs_inact = soc->pdev_bs_inact_reload;
4316 		}
4317 	}
4318 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4319 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4320 }
4321 
4322 /**
4323  * dp_set_inact_params(): set inactivity params
4324  * @pdev_handle - device handle
4325  * @inact_check_interval - inactivity interval
4326  * @inact_normal - Inactivity normal
4327  * @inact_overload - Inactivity overload
4328  *
4329  * Return: bool
4330  */
4331 bool dp_set_inact_params(struct cdp_pdev *pdev_handle,
4332 			 u_int16_t inact_check_interval,
4333 			 u_int16_t inact_normal, u_int16_t inact_overload)
4334 {
4335 	struct dp_soc *soc;
4336 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4337 
4338 	if (!pdev)
4339 		return false;
4340 
4341 	soc = pdev->soc;
4342 	if (!soc)
4343 		return false;
4344 
4345 	soc->pdev_bs_inact_interval = inact_check_interval;
4346 	soc->pdev_bs_inact_normal = inact_normal;
4347 	soc->pdev_bs_inact_overload = inact_overload;
4348 
4349 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4350 					soc->pdev_bs_inact_normal);
4351 
4352 	return true;
4353 }
4354 
4355 /**
4356  * dp_start_inact_timer(): Inactivity timer start
4357  * @pdev_handle - device handle
4358  * @enable - Inactivity timer start/stop
4359  *
4360  * Return: bool
4361  */
4362 bool dp_start_inact_timer(struct cdp_pdev *pdev_handle, bool enable)
4363 {
4364 	struct dp_soc *soc;
4365 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4366 
4367 	if (!pdev)
4368 		return false;
4369 
4370 	soc = pdev->soc;
4371 	if (!soc)
4372 		return false;
4373 
4374 	if (enable) {
4375 		dp_txrx_reset_inact_count((struct cdp_pdev *)pdev);
4376 		qdf_timer_mod(&soc->pdev_bs_inact_timer,
4377 			      soc->pdev_bs_inact_interval * 1000);
4378 	} else {
4379 		qdf_timer_stop(&soc->pdev_bs_inact_timer);
4380 	}
4381 
4382 	return true;
4383 }
4384 
4385 /**
4386  * dp_set_overload(): Set inactivity overload
4387  * @pdev_handle - device handle
4388  * @overload - overload status
4389  *
4390  * Return: void
4391  */
4392 void dp_set_overload(struct cdp_pdev *pdev_handle, bool overload)
4393 {
4394 	struct dp_soc *soc;
4395 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4396 
4397 	if (!pdev)
4398 		return;
4399 
4400 	soc = pdev->soc;
4401 	if (!soc)
4402 		return;
4403 
4404 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4405 			overload ? soc->pdev_bs_inact_overload :
4406 			soc->pdev_bs_inact_normal);
4407 }
4408 
4409 /**
4410  * dp_peer_is_inact(): check whether peer is inactive
4411  * @peer_handle - datapath peer handle
4412  *
4413  * Return: bool
4414  */
4415 bool dp_peer_is_inact(void *peer_handle)
4416 {
4417 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4418 
4419 	if (!peer)
4420 		return false;
4421 
4422 	return peer->peer_bs_inact_flag == 1;
4423 }
4424 
4425 /**
4426  * dp_init_inact_timer: initialize the inact timer
4427  * @soc - SOC handle
4428  *
4429  * Return: void
4430  */
4431 void dp_init_inact_timer(struct dp_soc *soc)
4432 {
4433 	qdf_timer_init(soc->osdev, &soc->pdev_bs_inact_timer,
4434 		dp_txrx_peer_find_inact_timeout_handler,
4435 		(void *)soc, QDF_TIMER_TYPE_WAKE_APPS);
4436 }
4437 
4438 #else
4439 
4440 bool dp_set_inact_params(struct cdp_pdev *pdev, u_int16_t inact_check_interval,
4441 			 u_int16_t inact_normal, u_int16_t inact_overload)
4442 {
4443 	return false;
4444 }
4445 
4446 bool dp_start_inact_timer(struct cdp_pdev *pdev, bool enable)
4447 {
4448 	return false;
4449 }
4450 
4451 void dp_set_overload(struct cdp_pdev *pdev, bool overload)
4452 {
4453 	return;
4454 }
4455 
4456 void dp_init_inact_timer(struct dp_soc *soc)
4457 {
4458 	return;
4459 }
4460 
4461 bool dp_peer_is_inact(void *peer)
4462 {
4463 	return false;
4464 }
4465 #endif
4466 
4467 /*
4468  * dp_peer_unref_delete() - unref and delete peer
4469  * @peer_handle:		Datapath peer handle
4470  *
4471  */
4472 void dp_peer_unref_delete(void *peer_handle)
4473 {
4474 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4475 	struct dp_peer *bss_peer = NULL;
4476 	struct dp_vdev *vdev = peer->vdev;
4477 	struct dp_pdev *pdev = vdev->pdev;
4478 	struct dp_soc *soc = pdev->soc;
4479 	struct dp_peer *tmppeer;
4480 	int found = 0;
4481 	uint16_t peer_id;
4482 	uint16_t vdev_id;
4483 
4484 	/*
4485 	 * Hold the lock all the way from checking if the peer ref count
4486 	 * is zero until the peer references are removed from the hash
4487 	 * table and vdev list (if the peer ref count is zero).
4488 	 * This protects against a new HL tx operation starting to use the
4489 	 * peer object just after this function concludes it's done being used.
4490 	 * Furthermore, the lock needs to be held while checking whether the
4491 	 * vdev's list of peers is empty, to make sure that list is not modified
4492 	 * concurrently with the empty check.
4493 	 */
4494 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4495 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4496 		  "%s: peer %pK ref_cnt(before decrement): %d", __func__,
4497 		  peer, qdf_atomic_read(&peer->ref_cnt));
4498 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
4499 		peer_id = peer->peer_ids[0];
4500 		vdev_id = vdev->vdev_id;
4501 
4502 		/*
4503 		 * Make sure that the reference to the peer in
4504 		 * peer object map is removed
4505 		 */
4506 		if (peer_id != HTT_INVALID_PEER)
4507 			soc->peer_id_to_obj_map[peer_id] = NULL;
4508 
4509 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4510 			"Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
4511 
4512 		/* remove the reference to the peer from the hash table */
4513 		dp_peer_find_hash_remove(soc, peer);
4514 
4515 		qdf_spin_lock_bh(&soc->ast_lock);
4516 		if (peer->self_ast_entry) {
4517 			dp_peer_del_ast(soc, peer->self_ast_entry);
4518 			peer->self_ast_entry = NULL;
4519 		}
4520 		qdf_spin_unlock_bh(&soc->ast_lock);
4521 
4522 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
4523 			if (tmppeer == peer) {
4524 				found = 1;
4525 				break;
4526 			}
4527 		}
4528 		if (found) {
4529 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
4530 				peer_list_elem);
4531 		} else {
4532 			/*Ignoring the remove operation as peer not found*/
4533 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
4534 				"peer %pK not found in vdev (%pK)->peer_list:%pK",
4535 				peer, vdev, &peer->vdev->peer_list);
4536 		}
4537 
4538 		/* cleanup the peer data */
4539 		dp_peer_cleanup(vdev, peer);
4540 
4541 		/* check whether the parent vdev has no peers left */
4542 		if (TAILQ_EMPTY(&vdev->peer_list)) {
4543 			/*
4544 			 * Now that there are no references to the peer, we can
4545 			 * release the peer reference lock.
4546 			 */
4547 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4548 			/*
4549 			 * Check if the parent vdev was waiting for its peers
4550 			 * to be deleted, in order for it to be deleted too.
4551 			 */
4552 			if (vdev->delete.pending) {
4553 				ol_txrx_vdev_delete_cb vdev_delete_cb =
4554 					vdev->delete.callback;
4555 				void *vdev_delete_context =
4556 					vdev->delete.context;
4557 
4558 				QDF_TRACE(QDF_MODULE_ID_DP,
4559 					QDF_TRACE_LEVEL_INFO_HIGH,
4560 					FL("deleting vdev object %pK (%pM)"
4561 					" - its last peer is done"),
4562 					vdev, vdev->mac_addr.raw);
4563 				/* all peers are gone, go ahead and delete it */
4564 				dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
4565 								FLOW_TYPE_VDEV,
4566 								vdev_id);
4567 				dp_tx_vdev_detach(vdev);
4568 				QDF_TRACE(QDF_MODULE_ID_DP,
4569 					QDF_TRACE_LEVEL_INFO_HIGH,
4570 					FL("deleting vdev object %pK (%pM)"),
4571 					vdev, vdev->mac_addr.raw);
4572 
4573 				qdf_mem_free(vdev);
4574 				vdev = NULL;
4575 				if (vdev_delete_cb)
4576 					vdev_delete_cb(vdev_delete_context);
4577 			}
4578 		} else {
4579 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4580 		}
4581 
4582 		if (vdev) {
4583 			if (vdev->vap_bss_peer == peer) {
4584 				vdev->vap_bss_peer = NULL;
4585 			}
4586 		}
4587 
4588 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
4589 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
4590 					vdev_id, peer->mac_addr.raw);
4591 		}
4592 
4593 		if (!vdev || !vdev->vap_bss_peer) {
4594 			goto free_peer;
4595 		}
4596 
4597 #ifdef notyet
4598 		qdf_mempool_free(soc->osdev, soc->mempool_ol_ath_peer, peer);
4599 #else
4600 		bss_peer = vdev->vap_bss_peer;
4601 		DP_UPDATE_STATS(vdev, peer);
4602 
4603 free_peer:
4604 		qdf_mem_free(peer);
4605 
4606 #endif
4607 	} else {
4608 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4609 	}
4610 }
4611 
4612 /*
4613  * dp_peer_detach_wifi3() – Detach txrx peer
4614  * @peer_handle: Datapath peer handle
4615  * @bitmap: bitmap indicating special handling of request.
4616  *
4617  */
4618 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
4619 {
4620 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4621 
4622 	/* redirect the peer's rx delivery function to point to a
4623 	 * discard func
4624 	 */
4625 
4626 	peer->rx_opt_proc = dp_rx_discard;
4627 	peer->ctrl_peer = NULL;
4628 
4629 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4630 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
4631 
4632 	dp_local_peer_id_free(peer->vdev->pdev, peer);
4633 	qdf_spinlock_destroy(&peer->peer_info_lock);
4634 
4635 	/*
4636 	 * Remove the reference added during peer_attach.
4637 	 * The peer will still be left allocated until the
4638 	 * PEER_UNMAP message arrives to remove the other
4639 	 * reference, added by the PEER_MAP message.
4640 	 */
4641 	dp_peer_unref_delete(peer_handle);
4642 }
4643 
4644 /*
4645  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
4646  * @peer_handle:		Datapath peer handle
4647  *
4648  */
4649 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
4650 {
4651 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4652 	return vdev->mac_addr.raw;
4653 }
4654 
4655 /*
4656  * dp_vdev_set_wds() - Enable per packet stats
4657  * @vdev_handle: DP VDEV handle
4658  * @val: value
4659  *
4660  * Return: none
4661  */
4662 static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
4663 {
4664 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4665 
4666 	vdev->wds_enabled = val;
4667 	return 0;
4668 }
4669 
4670 /*
4671  * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
4672  * @peer_handle:		Datapath peer handle
4673  *
4674  */
4675 static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
4676 						uint8_t vdev_id)
4677 {
4678 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
4679 	struct dp_vdev *vdev = NULL;
4680 
4681 	if (qdf_unlikely(!pdev))
4682 		return NULL;
4683 
4684 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4685 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4686 		if (vdev->vdev_id == vdev_id)
4687 			break;
4688 	}
4689 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4690 
4691 	return (struct cdp_vdev *)vdev;
4692 }
4693 
4694 static int dp_get_opmode(struct cdp_vdev *vdev_handle)
4695 {
4696 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4697 
4698 	return vdev->opmode;
4699 }
4700 
4701 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
4702 {
4703 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4704 	struct dp_pdev *pdev = vdev->pdev;
4705 
4706 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
4707 }
4708 
4709 /**
4710  * dp_reset_monitor_mode() - Disable monitor mode
4711  * @pdev_handle: Datapath PDEV handle
4712  *
4713  * Return: 0 on success, not 0 on failure
4714  */
4715 static int dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
4716 {
4717 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4718 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4719 	struct dp_soc *soc = pdev->soc;
4720 	uint8_t pdev_id;
4721 	int mac_id;
4722 
4723 	pdev_id = pdev->pdev_id;
4724 	soc = pdev->soc;
4725 
4726 	qdf_spin_lock_bh(&pdev->mon_lock);
4727 
4728 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4729 
4730 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4731 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4732 
4733 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4734 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4735 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4736 
4737 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4738 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4739 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4740 	}
4741 
4742 	pdev->monitor_vdev = NULL;
4743 
4744 	qdf_spin_unlock_bh(&pdev->mon_lock);
4745 
4746 	return 0;
4747 }
4748 
4749 /**
4750  * dp_set_nac() - set peer_nac
4751  * @peer_handle: Datapath PEER handle
4752  *
4753  * Return: void
4754  */
4755 static void dp_set_nac(struct cdp_peer *peer_handle)
4756 {
4757 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4758 
4759 	peer->nac = 1;
4760 }
4761 
4762 /**
4763  * dp_get_tx_pending() - read pending tx
4764  * @pdev_handle: Datapath PDEV handle
4765  *
4766  * Return: outstanding tx
4767  */
4768 static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
4769 {
4770 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4771 
4772 	return qdf_atomic_read(&pdev->num_tx_outstanding);
4773 }
4774 
4775 /**
4776  * dp_get_peer_mac_from_peer_id() - get peer mac
4777  * @pdev_handle: Datapath PDEV handle
4778  * @peer_id: Peer ID
4779  * @peer_mac: MAC addr of PEER
4780  *
4781  * Return: void
4782  */
4783 static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
4784 	uint32_t peer_id, uint8_t *peer_mac)
4785 {
4786 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4787 	struct dp_peer *peer;
4788 
4789 	if (pdev && peer_mac) {
4790 		peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
4791 		if (peer && peer->mac_addr.raw) {
4792 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
4793 					DP_MAC_ADDR_LEN);
4794 		}
4795 	}
4796 }
4797 
4798 /**
4799  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
4800  * @vdev_handle: Datapath VDEV handle
4801  * @smart_monitor: Flag to denote if its smart monitor mode
4802  *
4803  * Return: 0 on success, not 0 on failure
4804  */
4805 static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
4806 		uint8_t smart_monitor)
4807 {
4808 	/* Many monitor VAPs can exists in a system but only one can be up at
4809 	 * anytime
4810 	 */
4811 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4812 	struct dp_pdev *pdev;
4813 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4814 	struct dp_soc *soc;
4815 	uint8_t pdev_id;
4816 	int mac_id;
4817 
4818 	qdf_assert(vdev);
4819 
4820 	pdev = vdev->pdev;
4821 	pdev_id = pdev->pdev_id;
4822 	soc = pdev->soc;
4823 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4824 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
4825 		pdev, pdev_id, soc, vdev);
4826 
4827 	/*Check if current pdev's monitor_vdev exists */
4828 	if (pdev->monitor_vdev) {
4829 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4830 			"vdev=%pK", vdev);
4831 		qdf_assert(vdev);
4832 	}
4833 
4834 	pdev->monitor_vdev = vdev;
4835 
4836 	/* If smart monitor mode, do not configure monitor ring */
4837 	if (smart_monitor)
4838 		return QDF_STATUS_SUCCESS;
4839 
4840 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4841 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
4842 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4843 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4844 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4845 		pdev->mo_data_filter);
4846 
4847 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4848 
4849 	htt_tlv_filter.mpdu_start = 1;
4850 	htt_tlv_filter.msdu_start = 1;
4851 	htt_tlv_filter.packet = 1;
4852 	htt_tlv_filter.msdu_end = 1;
4853 	htt_tlv_filter.mpdu_end = 1;
4854 	htt_tlv_filter.packet_header = 1;
4855 	htt_tlv_filter.attention = 1;
4856 	htt_tlv_filter.ppdu_start = 0;
4857 	htt_tlv_filter.ppdu_end = 0;
4858 	htt_tlv_filter.ppdu_end_user_stats = 0;
4859 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4860 	htt_tlv_filter.ppdu_end_status_done = 0;
4861 	htt_tlv_filter.header_per_msdu = 1;
4862 	htt_tlv_filter.enable_fp =
4863 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4864 	htt_tlv_filter.enable_md = 0;
4865 	htt_tlv_filter.enable_mo =
4866 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4867 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4868 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4869 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4870 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4871 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4872 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4873 
4874 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4875 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4876 
4877 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4878 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4879 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4880 	}
4881 
4882 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4883 
4884 	htt_tlv_filter.mpdu_start = 1;
4885 	htt_tlv_filter.msdu_start = 0;
4886 	htt_tlv_filter.packet = 0;
4887 	htt_tlv_filter.msdu_end = 0;
4888 	htt_tlv_filter.mpdu_end = 0;
4889 	htt_tlv_filter.attention = 0;
4890 	htt_tlv_filter.ppdu_start = 1;
4891 	htt_tlv_filter.ppdu_end = 1;
4892 	htt_tlv_filter.ppdu_end_user_stats = 1;
4893 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4894 	htt_tlv_filter.ppdu_end_status_done = 1;
4895 	htt_tlv_filter.enable_fp = 1;
4896 	htt_tlv_filter.enable_md = 0;
4897 	htt_tlv_filter.enable_mo = 1;
4898 	if (pdev->mcopy_mode) {
4899 		htt_tlv_filter.packet_header = 1;
4900 	}
4901 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
4902 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
4903 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
4904 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
4905 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
4906 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
4907 
4908 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4909 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4910 						pdev->pdev_id);
4911 
4912 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4913 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4914 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4915 	}
4916 
4917 	return QDF_STATUS_SUCCESS;
4918 }
4919 
4920 /**
4921  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
4922  * @pdev_handle: Datapath PDEV handle
4923  * @filter_val: Flag to select Filter for monitor mode
4924  * Return: 0 on success, not 0 on failure
4925  */
4926 static int dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
4927 	struct cdp_monitor_filter *filter_val)
4928 {
4929 	/* Many monitor VAPs can exists in a system but only one can be up at
4930 	 * anytime
4931 	 */
4932 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4933 	struct dp_vdev *vdev = pdev->monitor_vdev;
4934 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4935 	struct dp_soc *soc;
4936 	uint8_t pdev_id;
4937 	int mac_id;
4938 
4939 	pdev_id = pdev->pdev_id;
4940 	soc = pdev->soc;
4941 
4942 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4943 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
4944 		pdev, pdev_id, soc, vdev);
4945 
4946 	/*Check if current pdev's monitor_vdev exists */
4947 	if (!pdev->monitor_vdev) {
4948 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4949 			"vdev=%pK", vdev);
4950 		qdf_assert(vdev);
4951 	}
4952 
4953 	/* update filter mode, type in pdev structure */
4954 	pdev->mon_filter_mode = filter_val->mode;
4955 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
4956 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
4957 	pdev->fp_data_filter = filter_val->fp_data;
4958 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
4959 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
4960 	pdev->mo_data_filter = filter_val->mo_data;
4961 
4962 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4963 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
4964 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4965 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4966 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4967 		pdev->mo_data_filter);
4968 
4969 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4970 
4971 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4972 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4973 
4974 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4975 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4976 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4977 
4978 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4979 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4980 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4981 	}
4982 
4983 	htt_tlv_filter.mpdu_start = 1;
4984 	htt_tlv_filter.msdu_start = 1;
4985 	htt_tlv_filter.packet = 1;
4986 	htt_tlv_filter.msdu_end = 1;
4987 	htt_tlv_filter.mpdu_end = 1;
4988 	htt_tlv_filter.packet_header = 1;
4989 	htt_tlv_filter.attention = 1;
4990 	htt_tlv_filter.ppdu_start = 0;
4991 	htt_tlv_filter.ppdu_end = 0;
4992 	htt_tlv_filter.ppdu_end_user_stats = 0;
4993 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4994 	htt_tlv_filter.ppdu_end_status_done = 0;
4995 	htt_tlv_filter.header_per_msdu = 1;
4996 	htt_tlv_filter.enable_fp =
4997 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4998 	htt_tlv_filter.enable_md = 0;
4999 	htt_tlv_filter.enable_mo =
5000 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
5001 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
5002 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
5003 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
5004 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
5005 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
5006 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
5007 
5008 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5009 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5010 
5011 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5012 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
5013 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
5014 	}
5015 
5016 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5017 
5018 	htt_tlv_filter.mpdu_start = 1;
5019 	htt_tlv_filter.msdu_start = 0;
5020 	htt_tlv_filter.packet = 0;
5021 	htt_tlv_filter.msdu_end = 0;
5022 	htt_tlv_filter.mpdu_end = 0;
5023 	htt_tlv_filter.attention = 0;
5024 	htt_tlv_filter.ppdu_start = 1;
5025 	htt_tlv_filter.ppdu_end = 1;
5026 	htt_tlv_filter.ppdu_end_user_stats = 1;
5027 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5028 	htt_tlv_filter.ppdu_end_status_done = 1;
5029 	htt_tlv_filter.enable_fp = 1;
5030 	htt_tlv_filter.enable_md = 0;
5031 	htt_tlv_filter.enable_mo = 1;
5032 	if (pdev->mcopy_mode) {
5033 		htt_tlv_filter.packet_header = 1;
5034 	}
5035 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5036 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5037 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5038 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5039 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5040 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
5041 
5042 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5043 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5044 						pdev->pdev_id);
5045 
5046 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5047 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5048 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5049 	}
5050 
5051 	return QDF_STATUS_SUCCESS;
5052 }
5053 
5054 /**
5055  * dp_get_pdev_id_frm_pdev() - get pdev_id
5056  * @pdev_handle: Datapath PDEV handle
5057  *
5058  * Return: pdev_id
5059  */
5060 static
5061 uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
5062 {
5063 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5064 
5065 	return pdev->pdev_id;
5066 }
5067 
5068 /**
5069  * dp_pdev_set_chan_noise_floor() - set channel noise floor
5070  * @pdev_handle: Datapath PDEV handle
5071  * @chan_noise_floor: Channel Noise Floor
5072  *
5073  * Return: void
5074  */
5075 static
5076 void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
5077 				  int16_t chan_noise_floor)
5078 {
5079 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5080 
5081 	pdev->chan_noise_floor = chan_noise_floor;
5082 }
5083 
5084 /**
5085  * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
5086  * @vdev_handle: Datapath VDEV handle
5087  * Return: true on ucast filter flag set
5088  */
5089 static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
5090 {
5091 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5092 	struct dp_pdev *pdev;
5093 
5094 	pdev = vdev->pdev;
5095 
5096 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
5097 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
5098 		return true;
5099 
5100 	return false;
5101 }
5102 
5103 /**
5104  * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
5105  * @vdev_handle: Datapath VDEV handle
5106  * Return: true on mcast filter flag set
5107  */
5108 static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
5109 {
5110 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5111 	struct dp_pdev *pdev;
5112 
5113 	pdev = vdev->pdev;
5114 
5115 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
5116 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
5117 		return true;
5118 
5119 	return false;
5120 }
5121 
5122 /**
5123  * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
5124  * @vdev_handle: Datapath VDEV handle
5125  * Return: true on non data filter flag set
5126  */
5127 static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
5128 {
5129 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5130 	struct dp_pdev *pdev;
5131 
5132 	pdev = vdev->pdev;
5133 
5134 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
5135 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
5136 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
5137 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
5138 			return true;
5139 		}
5140 	}
5141 
5142 	return false;
5143 }
5144 
5145 #ifdef MESH_MODE_SUPPORT
5146 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
5147 {
5148 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5149 
5150 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5151 		FL("val %d"), val);
5152 	vdev->mesh_vdev = val;
5153 }
5154 
5155 /*
5156  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
5157  * @vdev_hdl: virtual device object
5158  * @val: value to be set
5159  *
5160  * Return: void
5161  */
5162 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
5163 {
5164 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5165 
5166 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5167 		FL("val %d"), val);
5168 	vdev->mesh_rx_filter = val;
5169 }
5170 #endif
5171 
5172 /*
5173  * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
5174  * Current scope is bar received count
5175  *
5176  * @pdev_handle: DP_PDEV handle
5177  *
5178  * Return: void
5179  */
5180 #define STATS_PROC_TIMEOUT        (HZ/1000)
5181 
5182 static void
5183 dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
5184 {
5185 	struct dp_vdev *vdev;
5186 	struct dp_peer *peer;
5187 	uint32_t waitcnt;
5188 
5189 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5190 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5191 			if (!peer) {
5192 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5193 					FL("DP Invalid Peer refernce"));
5194 				return;
5195 			}
5196 
5197 			if (peer->delete_in_progress) {
5198 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5199 					FL("DP Peer deletion in progress"));
5200 				continue;
5201 			}
5202 
5203 			qdf_atomic_inc(&peer->ref_cnt);
5204 			waitcnt = 0;
5205 			dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
5206 			while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
5207 				&& waitcnt < 10) {
5208 				schedule_timeout_interruptible(
5209 						STATS_PROC_TIMEOUT);
5210 				waitcnt++;
5211 			}
5212 			qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
5213 			dp_peer_unref_delete(peer);
5214 		}
5215 	}
5216 }
5217 
5218 /**
5219  * dp_rx_bar_stats_cb(): BAR received stats callback
5220  * @soc: SOC handle
5221  * @cb_ctxt: Call back context
5222  * @reo_status: Reo status
5223  *
5224  * return: void
5225  */
5226 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
5227 	union hal_reo_status *reo_status)
5228 {
5229 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
5230 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
5231 
5232 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
5233 		DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
5234 			queue_status->header.status);
5235 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
5236 		return;
5237 	}
5238 
5239 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
5240 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
5241 
5242 }
5243 
5244 /**
5245  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
5246  * @vdev: DP VDEV handle
5247  *
5248  * return: void
5249  */
5250 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
5251 			     struct cdp_vdev_stats *vdev_stats)
5252 {
5253 	struct dp_peer *peer = NULL;
5254 	struct dp_soc *soc = vdev->pdev->soc;
5255 
5256 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
5257 
5258 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
5259 		dp_update_vdev_stats(vdev_stats, peer);
5260 
5261 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5262 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
5263 			&vdev->stats, (uint16_t) vdev->vdev_id,
5264 			UPDATE_VDEV_STATS);
5265 
5266 }
5267 
5268 /**
5269  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
5270  * @pdev: DP PDEV handle
5271  *
5272  * return: void
5273  */
5274 static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
5275 {
5276 	struct dp_vdev *vdev = NULL;
5277 	struct dp_soc *soc = pdev->soc;
5278 	struct cdp_vdev_stats *vdev_stats =
5279 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
5280 
5281 	if (!vdev_stats) {
5282 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5283 			  "DP alloc failure - unable to get alloc vdev stats");
5284 		return;
5285 	}
5286 
5287 	qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
5288 	qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
5289 	qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
5290 
5291 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5292 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5293 
5294 		dp_aggregate_vdev_stats(vdev, vdev_stats);
5295 		dp_update_pdev_stats(pdev, vdev_stats);
5296 
5297 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
5298 
5299 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
5300 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
5301 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
5302 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
5303 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
5304 		DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
5305 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
5306 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host.num);
5307 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
5308 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host.num);
5309 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
5310 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
5311 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
5312 		DP_STATS_AGGR(pdev, vdev,
5313 				tx_i.mcast_en.dropped_map_error);
5314 		DP_STATS_AGGR(pdev, vdev,
5315 				tx_i.mcast_en.dropped_self_mac);
5316 		DP_STATS_AGGR(pdev, vdev,
5317 				tx_i.mcast_en.dropped_send_fail);
5318 		DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
5319 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
5320 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
5321 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
5322 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na.num);
5323 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
5324 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified);
5325 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw);
5326 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.exception_fw);
5327 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.completion_fw);
5328 
5329 		pdev->stats.tx_i.dropped.dropped_pkt.num =
5330 			pdev->stats.tx_i.dropped.dma_error +
5331 			pdev->stats.tx_i.dropped.ring_full +
5332 			pdev->stats.tx_i.dropped.enqueue_fail +
5333 			pdev->stats.tx_i.dropped.desc_na.num +
5334 			pdev->stats.tx_i.dropped.res_full;
5335 
5336 		pdev->stats.tx.last_ack_rssi =
5337 			vdev->stats.tx.last_ack_rssi;
5338 		pdev->stats.tx_i.tso.num_seg =
5339 			vdev->stats.tx_i.tso.num_seg;
5340 	}
5341 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5342 	qdf_mem_free(vdev_stats);
5343 
5344 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5345 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
5346 				&pdev->stats, pdev->pdev_id, UPDATE_PDEV_STATS);
5347 
5348 }
5349 
5350 /**
5351  * dp_vdev_getstats() - get vdev packet level stats
5352  * @vdev_handle: Datapath VDEV handle
5353  * @stats: cdp network device stats structure
5354  *
5355  * Return: void
5356  */
5357 static void dp_vdev_getstats(void *vdev_handle,
5358 		struct cdp_dev_stats *stats)
5359 {
5360 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5361 	struct cdp_vdev_stats *vdev_stats =
5362 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
5363 
5364 	if (!vdev_stats) {
5365 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5366 			  "DP alloc failure - unable to get alloc vdev stats");
5367 		return;
5368 	}
5369 
5370 	dp_aggregate_vdev_stats(vdev, vdev_stats);
5371 
5372 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
5373 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
5374 
5375 	stats->tx_errors = vdev_stats->tx.tx_failed +
5376 		vdev_stats->tx_i.dropped.dropped_pkt.num;
5377 	stats->tx_dropped = stats->tx_errors;
5378 
5379 	stats->rx_packets = vdev_stats->rx.unicast.num +
5380 		vdev_stats->rx.multicast.num +
5381 		vdev_stats->rx.bcast.num;
5382 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
5383 		vdev_stats->rx.multicast.bytes +
5384 		vdev_stats->rx.bcast.bytes;
5385 
5386 }
5387 
5388 
5389 /**
5390  * dp_pdev_getstats() - get pdev packet level stats
5391  * @pdev_handle: Datapath PDEV handle
5392  * @stats: cdp network device stats structure
5393  *
5394  * Return: void
5395  */
5396 static void dp_pdev_getstats(void *pdev_handle,
5397 		struct cdp_dev_stats *stats)
5398 {
5399 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5400 
5401 	dp_aggregate_pdev_stats(pdev);
5402 
5403 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
5404 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
5405 
5406 	stats->tx_errors = pdev->stats.tx.tx_failed +
5407 		pdev->stats.tx_i.dropped.dropped_pkt.num;
5408 	stats->tx_dropped = stats->tx_errors;
5409 
5410 	stats->rx_packets = pdev->stats.rx.unicast.num +
5411 		pdev->stats.rx.multicast.num +
5412 		pdev->stats.rx.bcast.num;
5413 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
5414 		pdev->stats.rx.multicast.bytes +
5415 		pdev->stats.rx.bcast.bytes;
5416 }
5417 
5418 /**
5419  * dp_get_device_stats() - get interface level packet stats
5420  * @handle: device handle
5421  * @stats: cdp network device stats structure
5422  * @type: device type pdev/vdev
5423  *
5424  * Return: void
5425  */
5426 static void dp_get_device_stats(void *handle,
5427 		struct cdp_dev_stats *stats, uint8_t type)
5428 {
5429 	switch (type) {
5430 	case UPDATE_VDEV_STATS:
5431 		dp_vdev_getstats(handle, stats);
5432 		break;
5433 	case UPDATE_PDEV_STATS:
5434 		dp_pdev_getstats(handle, stats);
5435 		break;
5436 	default:
5437 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5438 			"apstats cannot be updated for this input "
5439 			"type %d", type);
5440 		break;
5441 	}
5442 
5443 }
5444 
5445 
5446 /**
5447  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
5448  * @pdev: DP_PDEV Handle
5449  *
5450  * Return:void
5451  */
5452 static inline void
5453 dp_print_pdev_tx_stats(struct dp_pdev *pdev)
5454 {
5455 	uint8_t index = 0;
5456 	DP_PRINT_STATS("PDEV Tx Stats:\n");
5457 	DP_PRINT_STATS("Received From Stack:");
5458 	DP_PRINT_STATS("	Packets = %d",
5459 			pdev->stats.tx_i.rcvd.num);
5460 	DP_PRINT_STATS("	Bytes = %llu",
5461 			pdev->stats.tx_i.rcvd.bytes);
5462 	DP_PRINT_STATS("Processed:");
5463 	DP_PRINT_STATS("	Packets = %d",
5464 			pdev->stats.tx_i.processed.num);
5465 	DP_PRINT_STATS("	Bytes = %llu",
5466 			pdev->stats.tx_i.processed.bytes);
5467 	DP_PRINT_STATS("Total Completions:");
5468 	DP_PRINT_STATS("	Packets = %u",
5469 			pdev->stats.tx.comp_pkt.num);
5470 	DP_PRINT_STATS("	Bytes = %llu",
5471 			pdev->stats.tx.comp_pkt.bytes);
5472 	DP_PRINT_STATS("Successful Completions:");
5473 	DP_PRINT_STATS("	Packets = %u",
5474 			pdev->stats.tx.tx_success.num);
5475 	DP_PRINT_STATS("	Bytes = %llu",
5476 			pdev->stats.tx.tx_success.bytes);
5477 	DP_PRINT_STATS("Dropped:");
5478 	DP_PRINT_STATS("	Total = %d",
5479 			pdev->stats.tx_i.dropped.dropped_pkt.num);
5480 	DP_PRINT_STATS("	Dma_map_error = %d",
5481 			pdev->stats.tx_i.dropped.dma_error);
5482 	DP_PRINT_STATS("	Ring Full = %d",
5483 			pdev->stats.tx_i.dropped.ring_full);
5484 	DP_PRINT_STATS("	Descriptor Not available = %d",
5485 			pdev->stats.tx_i.dropped.desc_na.num);
5486 	DP_PRINT_STATS("	HW enqueue failed= %d",
5487 			pdev->stats.tx_i.dropped.enqueue_fail);
5488 	DP_PRINT_STATS("	Resources Full = %d",
5489 			pdev->stats.tx_i.dropped.res_full);
5490 	DP_PRINT_STATS("	FW removed = %d",
5491 			pdev->stats.tx.dropped.fw_rem);
5492 	DP_PRINT_STATS("	FW removed transmitted = %d",
5493 			pdev->stats.tx.dropped.fw_rem_tx);
5494 	DP_PRINT_STATS("	FW removed untransmitted = %d",
5495 			pdev->stats.tx.dropped.fw_rem_notx);
5496 	DP_PRINT_STATS("	FW removed untransmitted fw_reason1 = %d",
5497 			pdev->stats.tx.dropped.fw_reason1);
5498 	DP_PRINT_STATS("	FW removed untransmitted fw_reason2 = %d",
5499 			pdev->stats.tx.dropped.fw_reason2);
5500 	DP_PRINT_STATS("	FW removed untransmitted fw_reason3 = %d",
5501 			pdev->stats.tx.dropped.fw_reason3);
5502 	DP_PRINT_STATS("	Aged Out from msdu/mpdu queues = %d",
5503 			pdev->stats.tx.dropped.age_out);
5504 	DP_PRINT_STATS("	Multicast:");
5505 	DP_PRINT_STATS("	Packets: %u",
5506 		       pdev->stats.tx.mcast.num);
5507 	DP_PRINT_STATS("	Bytes: %llu",
5508 		       pdev->stats.tx.mcast.bytes);
5509 	DP_PRINT_STATS("Scatter Gather:");
5510 	DP_PRINT_STATS("	Packets = %d",
5511 			pdev->stats.tx_i.sg.sg_pkt.num);
5512 	DP_PRINT_STATS("	Bytes = %llu",
5513 			pdev->stats.tx_i.sg.sg_pkt.bytes);
5514 	DP_PRINT_STATS("	Dropped By Host = %d",
5515 			pdev->stats.tx_i.sg.dropped_host.num);
5516 	DP_PRINT_STATS("	Dropped By Target = %d",
5517 			pdev->stats.tx_i.sg.dropped_target);
5518 	DP_PRINT_STATS("TSO:");
5519 	DP_PRINT_STATS("	Number of Segments = %d",
5520 			pdev->stats.tx_i.tso.num_seg);
5521 	DP_PRINT_STATS("	Packets = %d",
5522 			pdev->stats.tx_i.tso.tso_pkt.num);
5523 	DP_PRINT_STATS("	Bytes = %llu",
5524 			pdev->stats.tx_i.tso.tso_pkt.bytes);
5525 	DP_PRINT_STATS("	Dropped By Host = %d",
5526 			pdev->stats.tx_i.tso.dropped_host.num);
5527 	DP_PRINT_STATS("Mcast Enhancement:");
5528 	DP_PRINT_STATS("	Packets = %d",
5529 			pdev->stats.tx_i.mcast_en.mcast_pkt.num);
5530 	DP_PRINT_STATS("	Bytes = %llu",
5531 			pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
5532 	DP_PRINT_STATS("	Dropped: Map Errors = %d",
5533 			pdev->stats.tx_i.mcast_en.dropped_map_error);
5534 	DP_PRINT_STATS("	Dropped: Self Mac = %d",
5535 			pdev->stats.tx_i.mcast_en.dropped_self_mac);
5536 	DP_PRINT_STATS("	Dropped: Send Fail = %d",
5537 			pdev->stats.tx_i.mcast_en.dropped_send_fail);
5538 	DP_PRINT_STATS("	Unicast sent = %d",
5539 			pdev->stats.tx_i.mcast_en.ucast);
5540 	DP_PRINT_STATS("Raw:");
5541 	DP_PRINT_STATS("	Packets = %d",
5542 			pdev->stats.tx_i.raw.raw_pkt.num);
5543 	DP_PRINT_STATS("	Bytes = %llu",
5544 			pdev->stats.tx_i.raw.raw_pkt.bytes);
5545 	DP_PRINT_STATS("	DMA map error = %d",
5546 			pdev->stats.tx_i.raw.dma_map_error);
5547 	DP_PRINT_STATS("Reinjected:");
5548 	DP_PRINT_STATS("	Packets = %d",
5549 			pdev->stats.tx_i.reinject_pkts.num);
5550 	DP_PRINT_STATS("	Bytes = %llu\n",
5551 			pdev->stats.tx_i.reinject_pkts.bytes);
5552 	DP_PRINT_STATS("Inspected:");
5553 	DP_PRINT_STATS("	Packets = %d",
5554 			pdev->stats.tx_i.inspect_pkts.num);
5555 	DP_PRINT_STATS("	Bytes = %llu",
5556 			pdev->stats.tx_i.inspect_pkts.bytes);
5557 	DP_PRINT_STATS("Nawds Multicast:");
5558 	DP_PRINT_STATS("	Packets = %d",
5559 			pdev->stats.tx_i.nawds_mcast.num);
5560 	DP_PRINT_STATS("	Bytes = %llu",
5561 			pdev->stats.tx_i.nawds_mcast.bytes);
5562 	DP_PRINT_STATS("CCE Classified:");
5563 	DP_PRINT_STATS("	CCE Classified Packets: %u",
5564 			pdev->stats.tx_i.cce_classified);
5565 	DP_PRINT_STATS("	RAW CCE Classified Packets: %u",
5566 			pdev->stats.tx_i.cce_classified_raw);
5567 	DP_PRINT_STATS("Mesh stats:");
5568 	DP_PRINT_STATS("	frames to firmware: %u",
5569 			pdev->stats.tx_i.mesh.exception_fw);
5570 	DP_PRINT_STATS("	completions from fw: %u",
5571 			pdev->stats.tx_i.mesh.completion_fw);
5572 	DP_PRINT_STATS("PPDU stats counter");
5573 	for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
5574 		DP_PRINT_STATS("	Tag[%d] = %llu", index,
5575 				pdev->stats.ppdu_stats_counter[index]);
5576 	}
5577 }
5578 
5579 /**
5580  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
5581  * @pdev: DP_PDEV Handle
5582  *
5583  * Return: void
5584  */
5585 static inline void
5586 dp_print_pdev_rx_stats(struct dp_pdev *pdev)
5587 {
5588 	DP_PRINT_STATS("PDEV Rx Stats:\n");
5589 	DP_PRINT_STATS("Received From HW (Per Rx Ring):");
5590 	DP_PRINT_STATS("	Packets = %d %d %d %d",
5591 			pdev->stats.rx.rcvd_reo[0].num,
5592 			pdev->stats.rx.rcvd_reo[1].num,
5593 			pdev->stats.rx.rcvd_reo[2].num,
5594 			pdev->stats.rx.rcvd_reo[3].num);
5595 	DP_PRINT_STATS("	Bytes = %llu %llu %llu %llu",
5596 			pdev->stats.rx.rcvd_reo[0].bytes,
5597 			pdev->stats.rx.rcvd_reo[1].bytes,
5598 			pdev->stats.rx.rcvd_reo[2].bytes,
5599 			pdev->stats.rx.rcvd_reo[3].bytes);
5600 	DP_PRINT_STATS("Replenished:");
5601 	DP_PRINT_STATS("	Packets = %d",
5602 			pdev->stats.replenish.pkts.num);
5603 	DP_PRINT_STATS("	Bytes = %llu",
5604 			pdev->stats.replenish.pkts.bytes);
5605 	DP_PRINT_STATS("	Buffers Added To Freelist = %d",
5606 			pdev->stats.buf_freelist);
5607 	DP_PRINT_STATS("	Low threshold intr = %d",
5608 			pdev->stats.replenish.low_thresh_intrs);
5609 	DP_PRINT_STATS("Dropped:");
5610 	DP_PRINT_STATS("	msdu_not_done = %d",
5611 			pdev->stats.dropped.msdu_not_done);
5612 	DP_PRINT_STATS("        mon_rx_drop = %d",
5613 			pdev->stats.dropped.mon_rx_drop);
5614 	DP_PRINT_STATS("Sent To Stack:");
5615 	DP_PRINT_STATS("	Packets = %d",
5616 			pdev->stats.rx.to_stack.num);
5617 	DP_PRINT_STATS("	Bytes = %llu",
5618 			pdev->stats.rx.to_stack.bytes);
5619 	DP_PRINT_STATS("Multicast/Broadcast:");
5620 	DP_PRINT_STATS("	Packets = %d",
5621 			(pdev->stats.rx.multicast.num +
5622 			pdev->stats.rx.bcast.num));
5623 	DP_PRINT_STATS("	Bytes = %llu",
5624 			(pdev->stats.rx.multicast.bytes +
5625 			pdev->stats.rx.bcast.bytes));
5626 	DP_PRINT_STATS("Errors:");
5627 	DP_PRINT_STATS("	Rxdma Ring Un-inititalized = %d",
5628 			pdev->stats.replenish.rxdma_err);
5629 	DP_PRINT_STATS("	Desc Alloc Failed: = %d",
5630 			pdev->stats.err.desc_alloc_fail);
5631 	DP_PRINT_STATS("	IP checksum error = %d",
5632 		       pdev->stats.err.ip_csum_err);
5633 	DP_PRINT_STATS("	TCP/UDP checksum error = %d",
5634 		       pdev->stats.err.tcp_udp_csum_err);
5635 
5636 	/* Get bar_recv_cnt */
5637 	dp_aggregate_pdev_ctrl_frames_stats(pdev);
5638 	DP_PRINT_STATS("BAR Received Count: = %d",
5639 			pdev->stats.rx.bar_recv_cnt);
5640 
5641 }
5642 
5643 /**
5644  * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
5645  * @pdev: DP_PDEV Handle
5646  *
5647  * Return: void
5648  */
5649 static inline void
5650 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
5651 {
5652 	struct cdp_pdev_mon_stats *rx_mon_stats;
5653 
5654 	rx_mon_stats = &pdev->rx_mon_stats;
5655 
5656 	DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
5657 
5658 	dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
5659 
5660 	DP_PRINT_STATS("status_ppdu_done_cnt = %d",
5661 		       rx_mon_stats->status_ppdu_done);
5662 	DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
5663 		       rx_mon_stats->dest_ppdu_done);
5664 	DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
5665 		       rx_mon_stats->dest_mpdu_done);
5666 	DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
5667 		       rx_mon_stats->dest_mpdu_drop);
5668 }
5669 
5670 /**
5671  * dp_print_soc_tx_stats(): Print SOC level  stats
5672  * @soc DP_SOC Handle
5673  *
5674  * Return: void
5675  */
5676 static inline void
5677 dp_print_soc_tx_stats(struct dp_soc *soc)
5678 {
5679 	uint8_t desc_pool_id;
5680 	soc->stats.tx.desc_in_use = 0;
5681 
5682 	DP_PRINT_STATS("SOC Tx Stats:\n");
5683 
5684 	for (desc_pool_id = 0;
5685 	     desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5686 	     desc_pool_id++)
5687 		soc->stats.tx.desc_in_use +=
5688 			soc->tx_desc[desc_pool_id].num_allocated;
5689 
5690 	DP_PRINT_STATS("Tx Descriptors In Use = %d",
5691 			soc->stats.tx.desc_in_use);
5692 	DP_PRINT_STATS("Invalid peer:");
5693 	DP_PRINT_STATS("	Packets = %d",
5694 			soc->stats.tx.tx_invalid_peer.num);
5695 	DP_PRINT_STATS("	Bytes = %llu",
5696 			soc->stats.tx.tx_invalid_peer.bytes);
5697 	DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
5698 			soc->stats.tx.tcl_ring_full[0],
5699 			soc->stats.tx.tcl_ring_full[1],
5700 			soc->stats.tx.tcl_ring_full[2]);
5701 
5702 }
5703 /**
5704  * dp_print_soc_rx_stats: Print SOC level Rx stats
5705  * @soc: DP_SOC Handle
5706  *
5707  * Return:void
5708  */
5709 static inline void
5710 dp_print_soc_rx_stats(struct dp_soc *soc)
5711 {
5712 	uint32_t i;
5713 	char reo_error[DP_REO_ERR_LENGTH];
5714 	char rxdma_error[DP_RXDMA_ERR_LENGTH];
5715 	uint8_t index = 0;
5716 
5717 	DP_PRINT_STATS("SOC Rx Stats:\n");
5718 	DP_PRINT_STATS("Errors:\n");
5719 	DP_PRINT_STATS("Rx Decrypt Errors = %d",
5720 			(soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
5721 			soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
5722 	DP_PRINT_STATS("Invalid RBM = %d",
5723 			soc->stats.rx.err.invalid_rbm);
5724 	DP_PRINT_STATS("Invalid Vdev = %d",
5725 			soc->stats.rx.err.invalid_vdev);
5726 	DP_PRINT_STATS("Invalid Pdev = %d",
5727 			soc->stats.rx.err.invalid_pdev);
5728 	DP_PRINT_STATS("Invalid Peer = %d",
5729 			soc->stats.rx.err.rx_invalid_peer.num);
5730 	DP_PRINT_STATS("HAL Ring Access Fail = %d",
5731 			soc->stats.rx.err.hal_ring_access_fail);
5732 
5733 	for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
5734 		index += qdf_snprint(&rxdma_error[index],
5735 				DP_RXDMA_ERR_LENGTH - index,
5736 				" %d", soc->stats.rx.err.rxdma_error[i]);
5737 	}
5738 	DP_PRINT_STATS("RXDMA Error (0-31):%s",
5739 			rxdma_error);
5740 
5741 	index = 0;
5742 	for (i = 0; i < HAL_REO_ERR_MAX; i++) {
5743 		index += qdf_snprint(&reo_error[index],
5744 				DP_REO_ERR_LENGTH - index,
5745 				" %d", soc->stats.rx.err.reo_error[i]);
5746 	}
5747 	DP_PRINT_STATS("REO Error(0-14):%s",
5748 			reo_error);
5749 }
5750 
5751 
5752 /**
5753  * dp_print_ring_stat_from_hal(): Print hal level ring stats
5754  * @soc: DP_SOC handle
5755  * @srng: DP_SRNG handle
5756  * @ring_name: SRNG name
5757  *
5758  * Return: void
5759  */
5760 static inline void
5761 dp_print_ring_stat_from_hal(struct dp_soc *soc,  struct dp_srng *srng,
5762 	char *ring_name)
5763 {
5764 	uint32_t tailp;
5765 	uint32_t headp;
5766 
5767 	if (srng->hal_srng != NULL) {
5768 		hal_api_get_tphp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
5769 		DP_PRINT_STATS("%s : Head pointer = %d  Tail Pointer = %d\n",
5770 				ring_name, headp, tailp);
5771 	}
5772 }
5773 
5774 /**
5775  * dp_print_ring_stats(): Print tail and head pointer
5776  * @pdev: DP_PDEV handle
5777  *
5778  * Return:void
5779  */
5780 static inline void
5781 dp_print_ring_stats(struct dp_pdev *pdev)
5782 {
5783 	uint32_t i;
5784 	char ring_name[STR_MAXLEN + 1];
5785 	int mac_id;
5786 
5787 	dp_print_ring_stat_from_hal(pdev->soc,
5788 			&pdev->soc->reo_exception_ring,
5789 			"Reo Exception Ring");
5790 	dp_print_ring_stat_from_hal(pdev->soc,
5791 			&pdev->soc->reo_reinject_ring,
5792 			"Reo Inject Ring");
5793 	dp_print_ring_stat_from_hal(pdev->soc,
5794 			&pdev->soc->reo_cmd_ring,
5795 			"Reo Command Ring");
5796 	dp_print_ring_stat_from_hal(pdev->soc,
5797 			&pdev->soc->reo_status_ring,
5798 			"Reo Status Ring");
5799 	dp_print_ring_stat_from_hal(pdev->soc,
5800 			&pdev->soc->rx_rel_ring,
5801 			"Rx Release ring");
5802 	dp_print_ring_stat_from_hal(pdev->soc,
5803 			&pdev->soc->tcl_cmd_ring,
5804 			"Tcl command Ring");
5805 	dp_print_ring_stat_from_hal(pdev->soc,
5806 			&pdev->soc->tcl_status_ring,
5807 			"Tcl Status Ring");
5808 	dp_print_ring_stat_from_hal(pdev->soc,
5809 			&pdev->soc->wbm_desc_rel_ring,
5810 			"Wbm Desc Rel Ring");
5811 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5812 		snprintf(ring_name, STR_MAXLEN, "Reo Dest Ring %d", i);
5813 		dp_print_ring_stat_from_hal(pdev->soc,
5814 				&pdev->soc->reo_dest_ring[i],
5815 				ring_name);
5816 	}
5817 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
5818 		snprintf(ring_name, STR_MAXLEN, "Tcl Data Ring %d", i);
5819 		dp_print_ring_stat_from_hal(pdev->soc,
5820 				&pdev->soc->tcl_data_ring[i],
5821 				ring_name);
5822 	}
5823 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
5824 		snprintf(ring_name, STR_MAXLEN, "Tx Comp Ring %d", i);
5825 		dp_print_ring_stat_from_hal(pdev->soc,
5826 				&pdev->soc->tx_comp_ring[i],
5827 				ring_name);
5828 	}
5829 	dp_print_ring_stat_from_hal(pdev->soc,
5830 			&pdev->rx_refill_buf_ring,
5831 			"Rx Refill Buf Ring");
5832 
5833 	dp_print_ring_stat_from_hal(pdev->soc,
5834 			&pdev->rx_refill_buf_ring2,
5835 			"Second Rx Refill Buf Ring");
5836 
5837 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5838 		dp_print_ring_stat_from_hal(pdev->soc,
5839 				&pdev->rxdma_mon_buf_ring[mac_id],
5840 				"Rxdma Mon Buf Ring");
5841 		dp_print_ring_stat_from_hal(pdev->soc,
5842 				&pdev->rxdma_mon_dst_ring[mac_id],
5843 				"Rxdma Mon Dst Ring");
5844 		dp_print_ring_stat_from_hal(pdev->soc,
5845 				&pdev->rxdma_mon_status_ring[mac_id],
5846 				"Rxdma Mon Status Ring");
5847 		dp_print_ring_stat_from_hal(pdev->soc,
5848 				&pdev->rxdma_mon_desc_ring[mac_id],
5849 				"Rxdma mon desc Ring");
5850 	}
5851 
5852 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
5853 		snprintf(ring_name, STR_MAXLEN, "Rxdma err dst ring %d", i);
5854 		dp_print_ring_stat_from_hal(pdev->soc,
5855 			&pdev->rxdma_err_dst_ring[i],
5856 			ring_name);
5857 	}
5858 
5859 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
5860 		snprintf(ring_name, STR_MAXLEN, "Rx mac buf ring %d", i);
5861 		dp_print_ring_stat_from_hal(pdev->soc,
5862 				&pdev->rx_mac_buf_ring[i],
5863 				ring_name);
5864 	}
5865 }
5866 
5867 /**
5868  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
5869  * @vdev: DP_VDEV handle
5870  *
5871  * Return:void
5872  */
5873 static inline void
5874 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
5875 {
5876 	struct dp_peer *peer = NULL;
5877 	struct dp_soc *soc = (struct dp_soc *)vdev->pdev->soc;
5878 
5879 	DP_STATS_CLR(vdev->pdev);
5880 	DP_STATS_CLR(vdev->pdev->soc);
5881 	DP_STATS_CLR(vdev);
5882 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5883 		if (!peer)
5884 			return;
5885 		DP_STATS_CLR(peer);
5886 
5887 		if (soc->cdp_soc.ol_ops->update_dp_stats) {
5888 			soc->cdp_soc.ol_ops->update_dp_stats(
5889 					vdev->pdev->ctrl_pdev,
5890 					&peer->stats,
5891 					peer->peer_ids[0],
5892 					UPDATE_PEER_STATS);
5893 		}
5894 
5895 	}
5896 
5897 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5898 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
5899 				&vdev->stats, (uint16_t)vdev->vdev_id,
5900 				UPDATE_VDEV_STATS);
5901 }
5902 
5903 /**
5904  * dp_print_common_rates_info(): Print common rate for tx or rx
5905  * @pkt_type_array: rate type array contains rate info
5906  *
5907  * Return:void
5908  */
5909 static inline void
5910 dp_print_common_rates_info(struct cdp_pkt_type *pkt_type_array)
5911 {
5912 	uint8_t mcs, pkt_type;
5913 
5914 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5915 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5916 			if (!dp_rate_string[pkt_type][mcs].valid)
5917 				continue;
5918 
5919 			DP_PRINT_STATS("	%s = %d",
5920 				       dp_rate_string[pkt_type][mcs].mcs_type,
5921 				       pkt_type_array[pkt_type].mcs_count[mcs]);
5922 		}
5923 
5924 		DP_PRINT_STATS("\n");
5925 	}
5926 }
5927 
5928 /**
5929  * dp_print_rx_rates(): Print Rx rate stats
5930  * @vdev: DP_VDEV handle
5931  *
5932  * Return:void
5933  */
5934 static inline void
5935 dp_print_rx_rates(struct dp_vdev *vdev)
5936 {
5937 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5938 	uint8_t i;
5939 	uint8_t index = 0;
5940 	char nss[DP_NSS_LENGTH];
5941 
5942 	DP_PRINT_STATS("Rx Rate Info:\n");
5943 	dp_print_common_rates_info(pdev->stats.rx.pkt_type);
5944 
5945 
5946 	index = 0;
5947 	for (i = 0; i < SS_COUNT; i++) {
5948 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5949 				" %d", pdev->stats.rx.nss[i]);
5950 	}
5951 	DP_PRINT_STATS("NSS(1-8) = %s",
5952 			nss);
5953 
5954 	DP_PRINT_STATS("SGI ="
5955 			" 0.8us %d,"
5956 			" 0.4us %d,"
5957 			" 1.6us %d,"
5958 			" 3.2us %d,",
5959 			pdev->stats.rx.sgi_count[0],
5960 			pdev->stats.rx.sgi_count[1],
5961 			pdev->stats.rx.sgi_count[2],
5962 			pdev->stats.rx.sgi_count[3]);
5963 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
5964 			pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
5965 			pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
5966 	DP_PRINT_STATS("Reception Type ="
5967 			" SU: %d,"
5968 			" MU_MIMO:%d,"
5969 			" MU_OFDMA:%d,"
5970 			" MU_OFDMA_MIMO:%d\n",
5971 			pdev->stats.rx.reception_type[0],
5972 			pdev->stats.rx.reception_type[1],
5973 			pdev->stats.rx.reception_type[2],
5974 			pdev->stats.rx.reception_type[3]);
5975 	DP_PRINT_STATS("Aggregation:\n");
5976 	DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
5977 			pdev->stats.rx.ampdu_cnt);
5978 	DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
5979 			pdev->stats.rx.non_ampdu_cnt);
5980 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
5981 			pdev->stats.rx.amsdu_cnt);
5982 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
5983 			pdev->stats.rx.non_amsdu_cnt);
5984 }
5985 
5986 /**
5987  * dp_print_tx_rates(): Print tx rates
5988  * @vdev: DP_VDEV handle
5989  *
5990  * Return:void
5991  */
5992 static inline void
5993 dp_print_tx_rates(struct dp_vdev *vdev)
5994 {
5995 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5996 	uint8_t index;
5997 	char nss[DP_NSS_LENGTH];
5998 	int nss_index;
5999 
6000 	DP_PRINT_STATS("Tx Rate Info:\n");
6001 	dp_print_common_rates_info(pdev->stats.tx.pkt_type);
6002 
6003 	DP_PRINT_STATS("SGI ="
6004 			" 0.8us %d"
6005 			" 0.4us %d"
6006 			" 1.6us %d"
6007 			" 3.2us %d",
6008 			pdev->stats.tx.sgi_count[0],
6009 			pdev->stats.tx.sgi_count[1],
6010 			pdev->stats.tx.sgi_count[2],
6011 			pdev->stats.tx.sgi_count[3]);
6012 
6013 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
6014 			pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
6015 			pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
6016 
6017 	index = 0;
6018 	for (nss_index = 0; nss_index < SS_COUNT; nss_index++) {
6019 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6020 				" %d", pdev->stats.tx.nss[nss_index]);
6021 	}
6022 
6023 	DP_PRINT_STATS("NSS(1-8) = %s", nss);
6024 	DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
6025 	DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
6026 	DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
6027 	DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
6028 	DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
6029 
6030 	DP_PRINT_STATS("Aggregation:\n");
6031 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
6032 			pdev->stats.tx.amsdu_cnt);
6033 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
6034 			pdev->stats.tx.non_amsdu_cnt);
6035 }
6036 
6037 /**
6038  * dp_print_peer_stats():print peer stats
6039  * @peer: DP_PEER handle
6040  *
6041  * return void
6042  */
6043 static inline void dp_print_peer_stats(struct dp_peer *peer)
6044 {
6045 	uint8_t i;
6046 	uint32_t index;
6047 	char nss[DP_NSS_LENGTH];
6048 	DP_PRINT_STATS("Node Tx Stats:\n");
6049 	DP_PRINT_STATS("Total Packet Completions = %d",
6050 			peer->stats.tx.comp_pkt.num);
6051 	DP_PRINT_STATS("Total Bytes Completions = %llu",
6052 			peer->stats.tx.comp_pkt.bytes);
6053 	DP_PRINT_STATS("Success Packets = %d",
6054 			peer->stats.tx.tx_success.num);
6055 	DP_PRINT_STATS("Success Bytes = %llu",
6056 			peer->stats.tx.tx_success.bytes);
6057 	DP_PRINT_STATS("Unicast Success Packets = %d",
6058 			peer->stats.tx.ucast.num);
6059 	DP_PRINT_STATS("Unicast Success Bytes = %llu",
6060 			peer->stats.tx.ucast.bytes);
6061 	DP_PRINT_STATS("Multicast Success Packets = %d",
6062 			peer->stats.tx.mcast.num);
6063 	DP_PRINT_STATS("Multicast Success Bytes = %llu",
6064 			peer->stats.tx.mcast.bytes);
6065 	DP_PRINT_STATS("Broadcast Success Packets = %d",
6066 			peer->stats.tx.bcast.num);
6067 	DP_PRINT_STATS("Broadcast Success Bytes = %llu",
6068 			peer->stats.tx.bcast.bytes);
6069 	DP_PRINT_STATS("Packets Failed = %d",
6070 			peer->stats.tx.tx_failed);
6071 	DP_PRINT_STATS("Packets In OFDMA = %d",
6072 			peer->stats.tx.ofdma);
6073 	DP_PRINT_STATS("Packets In STBC = %d",
6074 			peer->stats.tx.stbc);
6075 	DP_PRINT_STATS("Packets In LDPC = %d",
6076 			peer->stats.tx.ldpc);
6077 	DP_PRINT_STATS("Packet Retries = %d",
6078 			peer->stats.tx.retries);
6079 	DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
6080 			peer->stats.tx.amsdu_cnt);
6081 	DP_PRINT_STATS("Last Packet RSSI = %d",
6082 			peer->stats.tx.last_ack_rssi);
6083 	DP_PRINT_STATS("Dropped At FW: Removed = %d",
6084 			peer->stats.tx.dropped.fw_rem);
6085 	DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
6086 			peer->stats.tx.dropped.fw_rem_tx);
6087 	DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
6088 			peer->stats.tx.dropped.fw_rem_notx);
6089 	DP_PRINT_STATS("Dropped : Age Out = %d",
6090 			peer->stats.tx.dropped.age_out);
6091 	DP_PRINT_STATS("NAWDS : ");
6092 	DP_PRINT_STATS("	Nawds multicast Drop Tx Packet = %d",
6093 			peer->stats.tx.nawds_mcast_drop);
6094 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Count = %d",
6095 			peer->stats.tx.nawds_mcast.num);
6096 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Bytes = %llu",
6097 			peer->stats.tx.nawds_mcast.bytes);
6098 
6099 	DP_PRINT_STATS("Rate Info:");
6100 	dp_print_common_rates_info(peer->stats.tx.pkt_type);
6101 
6102 
6103 	DP_PRINT_STATS("SGI = "
6104 			" 0.8us %d"
6105 			" 0.4us %d"
6106 			" 1.6us %d"
6107 			" 3.2us %d",
6108 			peer->stats.tx.sgi_count[0],
6109 			peer->stats.tx.sgi_count[1],
6110 			peer->stats.tx.sgi_count[2],
6111 			peer->stats.tx.sgi_count[3]);
6112 	DP_PRINT_STATS("Excess Retries per AC ");
6113 	DP_PRINT_STATS("	 Best effort = %d",
6114 			peer->stats.tx.excess_retries_per_ac[0]);
6115 	DP_PRINT_STATS("	 Background= %d",
6116 			peer->stats.tx.excess_retries_per_ac[1]);
6117 	DP_PRINT_STATS("	 Video = %d",
6118 			peer->stats.tx.excess_retries_per_ac[2]);
6119 	DP_PRINT_STATS("	 Voice = %d",
6120 			peer->stats.tx.excess_retries_per_ac[3]);
6121 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
6122 			peer->stats.tx.bw[2], peer->stats.tx.bw[3],
6123 			peer->stats.tx.bw[4], peer->stats.tx.bw[5]);
6124 
6125 	index = 0;
6126 	for (i = 0; i < SS_COUNT; i++) {
6127 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6128 				" %d", peer->stats.tx.nss[i]);
6129 	}
6130 	DP_PRINT_STATS("NSS(1-8) = %s",
6131 			nss);
6132 
6133 	DP_PRINT_STATS("Aggregation:");
6134 	DP_PRINT_STATS("	Number of Msdu's Part of Amsdu = %d",
6135 			peer->stats.tx.amsdu_cnt);
6136 	DP_PRINT_STATS("	Number of Msdu's With No Msdu Level Aggregation = %d\n",
6137 			peer->stats.tx.non_amsdu_cnt);
6138 
6139 	DP_PRINT_STATS("Node Rx Stats:");
6140 	DP_PRINT_STATS("Packets Sent To Stack = %d",
6141 			peer->stats.rx.to_stack.num);
6142 	DP_PRINT_STATS("Bytes Sent To Stack = %llu",
6143 			peer->stats.rx.to_stack.bytes);
6144 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
6145 		DP_PRINT_STATS("Ring Id = %d", i);
6146 		DP_PRINT_STATS("	Packets Received = %d",
6147 				peer->stats.rx.rcvd_reo[i].num);
6148 		DP_PRINT_STATS("	Bytes Received = %llu",
6149 				peer->stats.rx.rcvd_reo[i].bytes);
6150 	}
6151 	DP_PRINT_STATS("Multicast Packets Received = %d",
6152 			peer->stats.rx.multicast.num);
6153 	DP_PRINT_STATS("Multicast Bytes Received = %llu",
6154 			peer->stats.rx.multicast.bytes);
6155 	DP_PRINT_STATS("Broadcast Packets Received = %d",
6156 			peer->stats.rx.bcast.num);
6157 	DP_PRINT_STATS("Broadcast Bytes Received = %llu",
6158 			peer->stats.rx.bcast.bytes);
6159 	DP_PRINT_STATS("Intra BSS Packets Received = %d",
6160 			peer->stats.rx.intra_bss.pkts.num);
6161 	DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
6162 			peer->stats.rx.intra_bss.pkts.bytes);
6163 	DP_PRINT_STATS("Raw Packets Received = %d",
6164 			peer->stats.rx.raw.num);
6165 	DP_PRINT_STATS("Raw Bytes Received = %llu",
6166 			peer->stats.rx.raw.bytes);
6167 	DP_PRINT_STATS("Errors: MIC Errors = %d",
6168 			peer->stats.rx.err.mic_err);
6169 	DP_PRINT_STATS("Erros: Decryption Errors = %d",
6170 			peer->stats.rx.err.decrypt_err);
6171 	DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
6172 			peer->stats.rx.non_ampdu_cnt);
6173 	DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
6174 			peer->stats.rx.ampdu_cnt);
6175 	DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
6176 			peer->stats.rx.non_amsdu_cnt);
6177 	DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
6178 			peer->stats.rx.amsdu_cnt);
6179 	DP_PRINT_STATS("NAWDS : ");
6180 	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet = %d",
6181 			peer->stats.rx.nawds_mcast_drop);
6182 	DP_PRINT_STATS("SGI ="
6183 			" 0.8us %d"
6184 			" 0.4us %d"
6185 			" 1.6us %d"
6186 			" 3.2us %d",
6187 			peer->stats.rx.sgi_count[0],
6188 			peer->stats.rx.sgi_count[1],
6189 			peer->stats.rx.sgi_count[2],
6190 			peer->stats.rx.sgi_count[3]);
6191 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
6192 			peer->stats.rx.bw[0], peer->stats.rx.bw[1],
6193 			peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
6194 	DP_PRINT_STATS("Reception Type ="
6195 			" SU %d,"
6196 			" MU_MIMO %d,"
6197 			" MU_OFDMA %d,"
6198 			" MU_OFDMA_MIMO %d",
6199 			peer->stats.rx.reception_type[0],
6200 			peer->stats.rx.reception_type[1],
6201 			peer->stats.rx.reception_type[2],
6202 			peer->stats.rx.reception_type[3]);
6203 
6204 	dp_print_common_rates_info(peer->stats.rx.pkt_type);
6205 
6206 	index = 0;
6207 	for (i = 0; i < SS_COUNT; i++) {
6208 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6209 				" %d", peer->stats.rx.nss[i]);
6210 	}
6211 	DP_PRINT_STATS("NSS(1-8) = %s",
6212 			nss);
6213 
6214 	DP_PRINT_STATS("Aggregation:");
6215 	DP_PRINT_STATS("	Msdu's Part of Ampdu = %d",
6216 			peer->stats.rx.ampdu_cnt);
6217 	DP_PRINT_STATS("	Msdu's With No Mpdu Level Aggregation = %d",
6218 			peer->stats.rx.non_ampdu_cnt);
6219 	DP_PRINT_STATS("	Msdu's Part of Amsdu = %d",
6220 			peer->stats.rx.amsdu_cnt);
6221 	DP_PRINT_STATS("	Msdu's With No Msdu Level Aggregation = %d",
6222 			peer->stats.rx.non_amsdu_cnt);
6223 }
6224 
6225 /*
6226  * dp_get_host_peer_stats()- function to print peer stats
6227  * @pdev_handle: DP_PDEV handle
6228  * @mac_addr: mac address of the peer
6229  *
6230  * Return: void
6231  */
6232 static void
6233 dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
6234 {
6235 	struct dp_peer *peer;
6236 	uint8_t local_id;
6237 
6238 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
6239 			&local_id);
6240 
6241 	if (!peer) {
6242 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6243 			  "%s: Invalid peer\n", __func__);
6244 		return;
6245 	}
6246 
6247 	dp_print_peer_stats(peer);
6248 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
6249 }
6250 
6251 /**
6252  * dp_print_host_stats()- Function to print the stats aggregated at host
6253  * @vdev_handle: DP_VDEV handle
6254  * @type: host stats type
6255  *
6256  * Available Stat types
6257  * TXRX_CLEAR_STATS  : Clear the stats
6258  * TXRX_RX_RATE_STATS: Print Rx Rate Info
6259  * TXRX_TX_RATE_STATS: Print Tx Rate Info
6260  * TXRX_TX_HOST_STATS: Print Tx Stats
6261  * TXRX_RX_HOST_STATS: Print Rx Stats
6262  * TXRX_AST_STATS: Print AST Stats
6263  * TXRX_SRNG_PTR_STATS: Print SRNG ring pointer stats
6264  *
6265  * Return: 0 on success, print error message in case of failure
6266  */
6267 static int
6268 dp_print_host_stats(struct cdp_vdev *vdev_handle,
6269 		    struct cdp_txrx_stats_req *req)
6270 {
6271 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6272 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6273 	enum cdp_host_txrx_stats type =
6274 			dp_stats_mapping_table[req->stats][STATS_HOST];
6275 
6276 	dp_aggregate_pdev_stats(pdev);
6277 
6278 	switch (type) {
6279 	case TXRX_CLEAR_STATS:
6280 		dp_txrx_host_stats_clr(vdev);
6281 		break;
6282 	case TXRX_RX_RATE_STATS:
6283 		dp_print_rx_rates(vdev);
6284 		break;
6285 	case TXRX_TX_RATE_STATS:
6286 		dp_print_tx_rates(vdev);
6287 		break;
6288 	case TXRX_TX_HOST_STATS:
6289 		dp_print_pdev_tx_stats(pdev);
6290 		dp_print_soc_tx_stats(pdev->soc);
6291 		break;
6292 	case TXRX_RX_HOST_STATS:
6293 		dp_print_pdev_rx_stats(pdev);
6294 		dp_print_soc_rx_stats(pdev->soc);
6295 		break;
6296 	case TXRX_AST_STATS:
6297 		dp_print_ast_stats(pdev->soc);
6298 		dp_print_peer_table(vdev);
6299 		break;
6300 	case TXRX_SRNG_PTR_STATS:
6301 		dp_print_ring_stats(pdev);
6302 		break;
6303 	case TXRX_RX_MON_STATS:
6304 		dp_print_pdev_rx_mon_stats(pdev);
6305 		break;
6306 	case TXRX_REO_QUEUE_STATS:
6307 		dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
6308 		break;
6309 	default:
6310 		DP_TRACE(FATAL, "Wrong Input For TxRx Host Stats");
6311 		break;
6312 	}
6313 	return 0;
6314 }
6315 
6316 /*
6317  * dp_ppdu_ring_reset()- Reset PPDU Stats ring
6318  * @pdev: DP_PDEV handle
6319  *
6320  * Return: void
6321  */
6322 static void
6323 dp_ppdu_ring_reset(struct dp_pdev *pdev)
6324 {
6325 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
6326 	int mac_id;
6327 
6328 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
6329 
6330 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6331 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6332 							pdev->pdev_id);
6333 
6334 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6335 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6336 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6337 	}
6338 }
6339 
6340 /*
6341  * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
6342  * @pdev: DP_PDEV handle
6343  *
6344  * Return: void
6345  */
6346 static void
6347 dp_ppdu_ring_cfg(struct dp_pdev *pdev)
6348 {
6349 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
6350 	int mac_id;
6351 
6352 	htt_tlv_filter.mpdu_start = 1;
6353 	htt_tlv_filter.msdu_start = 0;
6354 	htt_tlv_filter.packet = 0;
6355 	htt_tlv_filter.msdu_end = 0;
6356 	htt_tlv_filter.mpdu_end = 0;
6357 	htt_tlv_filter.attention = 0;
6358 	htt_tlv_filter.ppdu_start = 1;
6359 	htt_tlv_filter.ppdu_end = 1;
6360 	htt_tlv_filter.ppdu_end_user_stats = 1;
6361 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6362 	htt_tlv_filter.ppdu_end_status_done = 1;
6363 	htt_tlv_filter.enable_fp = 1;
6364 	htt_tlv_filter.enable_md = 0;
6365 	if (pdev->mcopy_mode) {
6366 		htt_tlv_filter.packet_header = 1;
6367 		htt_tlv_filter.enable_mo = 1;
6368 	}
6369 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6370 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6371 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6372 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6373 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6374 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
6375 
6376 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6377 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6378 						pdev->pdev_id);
6379 
6380 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6381 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6382 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6383 	}
6384 }
6385 
6386 /*
6387  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
6388  *                              modes are enabled or not.
6389  * @dp_pdev: dp pdev handle.
6390  *
6391  * Return: bool
6392  */
6393 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
6394 {
6395 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
6396 	    !pdev->mcopy_mode)
6397 		return true;
6398 	else
6399 		return false;
6400 }
6401 
6402 /*
6403  *dp_set_bpr_enable() - API to enable/disable bpr feature
6404  *@pdev_handle: DP_PDEV handle.
6405  *@val: Provided value.
6406  *
6407  *Return: void
6408  */
6409 static void
6410 dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
6411 {
6412 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6413 
6414 	switch (val) {
6415 	case CDP_BPR_DISABLE:
6416 		pdev->bpr_enable = CDP_BPR_DISABLE;
6417 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6418 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
6419 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6420 		} else if (pdev->enhanced_stats_en &&
6421 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6422 			   !pdev->pktlog_ppdu_stats) {
6423 			dp_h2t_cfg_stats_msg_send(pdev,
6424 						  DP_PPDU_STATS_CFG_ENH_STATS,
6425 						  pdev->pdev_id);
6426 		}
6427 		break;
6428 	case CDP_BPR_ENABLE:
6429 		pdev->bpr_enable = CDP_BPR_ENABLE;
6430 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
6431 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
6432 			dp_h2t_cfg_stats_msg_send(pdev,
6433 						  DP_PPDU_STATS_CFG_BPR,
6434 						  pdev->pdev_id);
6435 		} else if (pdev->enhanced_stats_en &&
6436 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6437 			   !pdev->pktlog_ppdu_stats) {
6438 			dp_h2t_cfg_stats_msg_send(pdev,
6439 						  DP_PPDU_STATS_CFG_BPR_ENH,
6440 						  pdev->pdev_id);
6441 		} else if (pdev->pktlog_ppdu_stats) {
6442 			dp_h2t_cfg_stats_msg_send(pdev,
6443 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
6444 						  pdev->pdev_id);
6445 		}
6446 		break;
6447 	default:
6448 		break;
6449 	}
6450 }
6451 
6452 /*
6453  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
6454  * @pdev_handle: DP_PDEV handle
6455  * @val: user provided value
6456  *
6457  * Return: void
6458  */
6459 static void
6460 dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
6461 {
6462 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6463 
6464 	switch (val) {
6465 	case 0:
6466 		pdev->tx_sniffer_enable = 0;
6467 		pdev->mcopy_mode = 0;
6468 
6469 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6470 		    !pdev->bpr_enable) {
6471 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6472 			dp_ppdu_ring_reset(pdev);
6473 		} else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
6474 			dp_h2t_cfg_stats_msg_send(pdev,
6475 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
6476 		} else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
6477 			dp_h2t_cfg_stats_msg_send(pdev,
6478 						  DP_PPDU_STATS_CFG_BPR_ENH,
6479 						  pdev->pdev_id);
6480 		} else {
6481 			dp_h2t_cfg_stats_msg_send(pdev,
6482 						  DP_PPDU_STATS_CFG_BPR,
6483 						  pdev->pdev_id);
6484 		}
6485 		break;
6486 
6487 	case 1:
6488 		pdev->tx_sniffer_enable = 1;
6489 		pdev->mcopy_mode = 0;
6490 
6491 		if (!pdev->pktlog_ppdu_stats)
6492 			dp_h2t_cfg_stats_msg_send(pdev,
6493 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
6494 		break;
6495 	case 2:
6496 		pdev->mcopy_mode = 1;
6497 		pdev->tx_sniffer_enable = 0;
6498 		dp_ppdu_ring_cfg(pdev);
6499 
6500 		if (!pdev->pktlog_ppdu_stats)
6501 			dp_h2t_cfg_stats_msg_send(pdev,
6502 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
6503 		break;
6504 	default:
6505 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6506 			"Invalid value");
6507 		break;
6508 	}
6509 }
6510 
6511 /*
6512  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
6513  * @pdev_handle: DP_PDEV handle
6514  *
6515  * Return: void
6516  */
6517 static void
6518 dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
6519 {
6520 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6521 	pdev->enhanced_stats_en = 1;
6522 
6523 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added)
6524 		dp_ppdu_ring_cfg(pdev);
6525 
6526 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
6527 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
6528 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
6529 		dp_h2t_cfg_stats_msg_send(pdev,
6530 					  DP_PPDU_STATS_CFG_BPR_ENH,
6531 					  pdev->pdev_id);
6532 	}
6533 }
6534 
6535 /*
6536  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
6537  * @pdev_handle: DP_PDEV handle
6538  *
6539  * Return: void
6540  */
6541 static void
6542 dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
6543 {
6544 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6545 
6546 	pdev->enhanced_stats_en = 0;
6547 
6548 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
6549 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6550 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
6551 		dp_h2t_cfg_stats_msg_send(pdev,
6552 					  DP_PPDU_STATS_CFG_BPR,
6553 					  pdev->pdev_id);
6554 	}
6555 
6556 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added)
6557 		dp_ppdu_ring_reset(pdev);
6558 }
6559 
6560 /*
6561  * dp_get_fw_peer_stats()- function to print peer stats
6562  * @pdev_handle: DP_PDEV handle
6563  * @mac_addr: mac address of the peer
6564  * @cap: Type of htt stats requested
6565  *
6566  * Currently Supporting only MAC ID based requests Only
6567  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
6568  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
6569  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
6570  *
6571  * Return: void
6572  */
6573 static void
6574 dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
6575 		uint32_t cap)
6576 {
6577 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6578 	int i;
6579 	uint32_t config_param0 = 0;
6580 	uint32_t config_param1 = 0;
6581 	uint32_t config_param2 = 0;
6582 	uint32_t config_param3 = 0;
6583 
6584 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
6585 	config_param0 |= (1 << (cap + 1));
6586 
6587 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
6588 		config_param1 |= (1 << i);
6589 	}
6590 
6591 	config_param2 |= (mac_addr[0] & 0x000000ff);
6592 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
6593 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
6594 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
6595 
6596 	config_param3 |= (mac_addr[4] & 0x000000ff);
6597 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
6598 
6599 	dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
6600 			config_param0, config_param1, config_param2,
6601 			config_param3, 0, 0, 0);
6602 
6603 }
6604 
6605 /* This struct definition will be removed from here
6606  * once it get added in FW headers*/
6607 struct httstats_cmd_req {
6608     uint32_t    config_param0;
6609     uint32_t    config_param1;
6610     uint32_t    config_param2;
6611     uint32_t    config_param3;
6612     int cookie;
6613     u_int8_t    stats_id;
6614 };
6615 
6616 /*
6617  * dp_get_htt_stats: function to process the httstas request
6618  * @pdev_handle: DP pdev handle
6619  * @data: pointer to request data
6620  * @data_len: length for request data
6621  *
6622  * return: void
6623  */
6624 static void
6625 dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
6626 {
6627 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6628 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
6629 
6630 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
6631 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
6632 				req->config_param0, req->config_param1,
6633 				req->config_param2, req->config_param3,
6634 				req->cookie, 0, 0);
6635 }
6636 
6637 /*
6638  * dp_set_pdev_param: function to set parameters in pdev
6639  * @pdev_handle: DP pdev handle
6640  * @param: parameter type to be set
6641  * @val: value of parameter to be set
6642  *
6643  * return: void
6644  */
6645 static void dp_set_pdev_param(struct cdp_pdev *pdev_handle,
6646 		enum cdp_pdev_param_type param, uint8_t val)
6647 {
6648 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6649 	switch (param) {
6650 	case CDP_CONFIG_DEBUG_SNIFFER:
6651 		dp_config_debug_sniffer(pdev_handle, val);
6652 		break;
6653 	case CDP_CONFIG_BPR_ENABLE:
6654 		dp_set_bpr_enable(pdev_handle, val);
6655 		break;
6656 	case CDP_CONFIG_PRIMARY_RADIO:
6657 		pdev->is_primary = val;
6658 		break;
6659 	default:
6660 		break;
6661 	}
6662 }
6663 
6664 /*
6665  * dp_set_vdev_param: function to set parameters in vdev
6666  * @param: parameter type to be set
6667  * @val: value of parameter to be set
6668  *
6669  * return: void
6670  */
6671 static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
6672 		enum cdp_vdev_param_type param, uint32_t val)
6673 {
6674 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6675 	switch (param) {
6676 	case CDP_ENABLE_WDS:
6677 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6678 			  "wds_enable %d for vdev(%p) id(%d)\n",
6679 			  val, vdev, vdev->vdev_id);
6680 		vdev->wds_enabled = val;
6681 		break;
6682 	case CDP_ENABLE_NAWDS:
6683 		vdev->nawds_enabled = val;
6684 		break;
6685 	case CDP_ENABLE_MCAST_EN:
6686 		vdev->mcast_enhancement_en = val;
6687 		break;
6688 	case CDP_ENABLE_PROXYSTA:
6689 		vdev->proxysta_vdev = val;
6690 		break;
6691 	case CDP_UPDATE_TDLS_FLAGS:
6692 		vdev->tdls_link_connected = val;
6693 		break;
6694 	case CDP_CFG_WDS_AGING_TIMER:
6695 		if (val == 0)
6696 			qdf_timer_stop(&vdev->pdev->soc->wds_aging_timer);
6697 		else if (val != vdev->wds_aging_timer_val)
6698 			qdf_timer_mod(&vdev->pdev->soc->wds_aging_timer, val);
6699 
6700 		vdev->wds_aging_timer_val = val;
6701 		break;
6702 	case CDP_ENABLE_AP_BRIDGE:
6703 		if (wlan_op_mode_sta != vdev->opmode)
6704 			vdev->ap_bridge_enabled = val;
6705 		else
6706 			vdev->ap_bridge_enabled = false;
6707 		break;
6708 	case CDP_ENABLE_CIPHER:
6709 		vdev->sec_type = val;
6710 		break;
6711 	case CDP_ENABLE_QWRAP_ISOLATION:
6712 		vdev->isolation_vdev = val;
6713 		break;
6714 	default:
6715 		break;
6716 	}
6717 
6718 	dp_tx_vdev_update_search_flags(vdev);
6719 }
6720 
6721 /**
6722  * dp_peer_set_nawds: set nawds bit in peer
6723  * @peer_handle: pointer to peer
6724  * @value: enable/disable nawds
6725  *
6726  * return: void
6727  */
6728 static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
6729 {
6730 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6731 	peer->nawds_enabled = value;
6732 }
6733 
6734 /*
6735  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
6736  * @vdev_handle: DP_VDEV handle
6737  * @map_id:ID of map that needs to be updated
6738  *
6739  * Return: void
6740  */
6741 static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
6742 		uint8_t map_id)
6743 {
6744 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6745 	vdev->dscp_tid_map_id = map_id;
6746 	return;
6747 }
6748 
6749 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
6750  * @peer_handle: DP_PEER handle
6751  *
6752  * return : cdp_peer_stats pointer
6753  */
6754 static struct cdp_peer_stats*
6755 		dp_txrx_get_peer_stats(struct cdp_peer *peer_handle)
6756 {
6757 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6758 
6759 	qdf_assert(peer);
6760 
6761 	return &peer->stats;
6762 }
6763 
6764 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
6765  * @peer_handle: DP_PEER handle
6766  *
6767  * return : void
6768  */
6769 static void dp_txrx_reset_peer_stats(struct cdp_peer *peer_handle)
6770 {
6771 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6772 
6773 	qdf_assert(peer);
6774 
6775 	qdf_mem_set(&peer->stats, sizeof(peer->stats), 0);
6776 }
6777 
6778 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
6779  * @vdev_handle: DP_VDEV handle
6780  * @buf: buffer for vdev stats
6781  *
6782  * return : int
6783  */
6784 static int  dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
6785 				   bool is_aggregate)
6786 {
6787 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6788 	struct cdp_vdev_stats *vdev_stats = (struct cdp_vdev_stats *)buf;
6789 
6790 	if (is_aggregate)
6791 		dp_aggregate_vdev_stats(vdev, buf);
6792 	else
6793 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
6794 
6795 	return 0;
6796 }
6797 
6798 /*
6799  * dp_txrx_stats_publish(): publish pdev stats into a buffer
6800  * @pdev_handle: DP_PDEV handle
6801  * @buf: to hold pdev_stats
6802  *
6803  * Return: int
6804  */
6805 static int
6806 dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
6807 {
6808 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6809 	struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
6810 	struct cdp_txrx_stats_req req = {0,};
6811 
6812 	dp_aggregate_pdev_stats(pdev);
6813 	req.stats = HTT_DBG_EXT_STATS_PDEV_TX;
6814 	req.cookie_val = 1;
6815 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
6816 				req.param1, req.param2, req.param3, 0,
6817 				req.cookie_val, 0);
6818 
6819 	msleep(DP_MAX_SLEEP_TIME);
6820 
6821 	req.stats = HTT_DBG_EXT_STATS_PDEV_RX;
6822 	req.cookie_val = 1;
6823 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
6824 				req.param1, req.param2, req.param3, 0,
6825 				req.cookie_val, 0);
6826 
6827 	msleep(DP_MAX_SLEEP_TIME);
6828 	qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
6829 
6830 	return TXRX_STATS_LEVEL;
6831 }
6832 
6833 /**
6834  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
6835  * @pdev: DP_PDEV handle
6836  * @map_id: ID of map that needs to be updated
6837  * @tos: index value in map
6838  * @tid: tid value passed by the user
6839  *
6840  * Return: void
6841  */
6842 static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
6843 		uint8_t map_id, uint8_t tos, uint8_t tid)
6844 {
6845 	uint8_t dscp;
6846 	struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
6847 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
6848 	pdev->dscp_tid_map[map_id][dscp] = tid;
6849 	if (map_id < HAL_MAX_HW_DSCP_TID_MAPS)
6850 		hal_tx_update_dscp_tid(pdev->soc->hal_soc, tid,
6851 			map_id, dscp);
6852 	return;
6853 }
6854 
6855 /**
6856  * dp_fw_stats_process(): Process TxRX FW stats request
6857  * @vdev_handle: DP VDEV handle
6858  * @req: stats request
6859  *
6860  * return: int
6861  */
6862 static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
6863 		struct cdp_txrx_stats_req *req)
6864 {
6865 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6866 	struct dp_pdev *pdev = NULL;
6867 	uint32_t stats = req->stats;
6868 	uint8_t mac_id = req->mac_id;
6869 
6870 	if (!vdev) {
6871 		DP_TRACE(NONE, "VDEV not found");
6872 		return 1;
6873 	}
6874 	pdev = vdev->pdev;
6875 
6876 	/*
6877 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
6878 	 * from param0 to param3 according to below rule:
6879 	 *
6880 	 * PARAM:
6881 	 *   - config_param0 : start_offset (stats type)
6882 	 *   - config_param1 : stats bmask from start offset
6883 	 *   - config_param2 : stats bmask from start offset + 32
6884 	 *   - config_param3 : stats bmask from start offset + 64
6885 	 */
6886 	if (req->stats == CDP_TXRX_STATS_0) {
6887 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
6888 		req->param1 = 0xFFFFFFFF;
6889 		req->param2 = 0xFFFFFFFF;
6890 		req->param3 = 0xFFFFFFFF;
6891 	}
6892 
6893 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
6894 				req->param1, req->param2, req->param3,
6895 				0, 0, mac_id);
6896 }
6897 
6898 /**
6899  * dp_txrx_stats_request - function to map to firmware and host stats
6900  * @vdev: virtual handle
6901  * @req: stats request
6902  *
6903  * Return: QDF_STATUS
6904  */
6905 static
6906 QDF_STATUS dp_txrx_stats_request(struct cdp_vdev *vdev,
6907 				 struct cdp_txrx_stats_req *req)
6908 {
6909 	int host_stats;
6910 	int fw_stats;
6911 	enum cdp_stats stats;
6912 	int num_stats;
6913 
6914 	if (!vdev || !req) {
6915 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6916 				"Invalid vdev/req instance");
6917 		return QDF_STATUS_E_INVAL;
6918 	}
6919 
6920 	stats = req->stats;
6921 	if (stats >= CDP_TXRX_MAX_STATS)
6922 		return QDF_STATUS_E_INVAL;
6923 
6924 	/*
6925 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
6926 	 *			has to be updated if new FW HTT stats added
6927 	 */
6928 	if (stats > CDP_TXRX_STATS_HTT_MAX)
6929 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
6930 
6931 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
6932 
6933 	if (stats >= num_stats) {
6934 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6935 			  "%s: Invalid stats option: %d", __func__, stats);
6936 		return QDF_STATUS_E_INVAL;
6937 	}
6938 
6939 	req->stats = stats;
6940 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
6941 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
6942 
6943 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6944 		 "stats: %u fw_stats_type: %d host_stats: %d",
6945 		  stats, fw_stats, host_stats);
6946 
6947 	if (fw_stats != TXRX_FW_STATS_INVALID) {
6948 		/* update request with FW stats type */
6949 		req->stats = fw_stats;
6950 		return dp_fw_stats_process(vdev, req);
6951 	}
6952 
6953 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
6954 			(host_stats <= TXRX_HOST_STATS_MAX))
6955 		return dp_print_host_stats(vdev, req);
6956 	else
6957 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6958 				"Wrong Input for TxRx Stats");
6959 
6960 	return QDF_STATUS_SUCCESS;
6961 }
6962 
6963 /*
6964  * dp_print_napi_stats(): NAPI stats
6965  * @soc - soc handle
6966  */
6967 static void dp_print_napi_stats(struct dp_soc *soc)
6968 {
6969 	hif_print_napi_stats(soc->hif_handle);
6970 }
6971 
6972 /*
6973  * dp_print_per_ring_stats(): Packet count per ring
6974  * @soc - soc handle
6975  */
6976 static void dp_print_per_ring_stats(struct dp_soc *soc)
6977 {
6978 	uint8_t ring;
6979 	uint16_t core;
6980 	uint64_t total_packets;
6981 
6982 	DP_TRACE(FATAL, "Reo packets per ring:");
6983 	for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
6984 		total_packets = 0;
6985 		DP_TRACE(FATAL, "Packets on ring %u:", ring);
6986 		for (core = 0; core < NR_CPUS; core++) {
6987 			DP_TRACE(FATAL, "Packets arriving on core %u: %llu",
6988 				core, soc->stats.rx.ring_packets[core][ring]);
6989 			total_packets += soc->stats.rx.ring_packets[core][ring];
6990 		}
6991 		DP_TRACE(FATAL, "Total packets on ring %u: %llu",
6992 			ring, total_packets);
6993 	}
6994 }
6995 
6996 /*
6997  * dp_txrx_path_stats() - Function to display dump stats
6998  * @soc - soc handle
6999  *
7000  * return: none
7001  */
7002 static void dp_txrx_path_stats(struct dp_soc *soc)
7003 {
7004 	uint8_t error_code;
7005 	uint8_t loop_pdev;
7006 	struct dp_pdev *pdev;
7007 	uint8_t i;
7008 
7009 	for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
7010 
7011 		pdev = soc->pdev_list[loop_pdev];
7012 		dp_aggregate_pdev_stats(pdev);
7013 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7014 			"Tx path Statistics:");
7015 
7016 		DP_TRACE(FATAL, "from stack: %u msdus (%llu bytes)",
7017 			pdev->stats.tx_i.rcvd.num,
7018 			pdev->stats.tx_i.rcvd.bytes);
7019 		DP_TRACE(FATAL, "processed from host: %u msdus (%llu bytes)",
7020 			pdev->stats.tx_i.processed.num,
7021 			pdev->stats.tx_i.processed.bytes);
7022 		DP_TRACE(FATAL, "successfully transmitted: %u msdus (%llu bytes)",
7023 			pdev->stats.tx.tx_success.num,
7024 			pdev->stats.tx.tx_success.bytes);
7025 
7026 		DP_TRACE(FATAL, "Dropped in host:");
7027 		DP_TRACE(FATAL, "Total packets dropped: %u,",
7028 			pdev->stats.tx_i.dropped.dropped_pkt.num);
7029 		DP_TRACE(FATAL, "Descriptor not available: %u",
7030 			pdev->stats.tx_i.dropped.desc_na.num);
7031 		DP_TRACE(FATAL, "Ring full: %u",
7032 			pdev->stats.tx_i.dropped.ring_full);
7033 		DP_TRACE(FATAL, "Enqueue fail: %u",
7034 			pdev->stats.tx_i.dropped.enqueue_fail);
7035 		DP_TRACE(FATAL, "DMA Error: %u",
7036 			pdev->stats.tx_i.dropped.dma_error);
7037 
7038 		DP_TRACE(FATAL, "Dropped in hardware:");
7039 		DP_TRACE(FATAL, "total packets dropped: %u",
7040 			pdev->stats.tx.tx_failed);
7041 		DP_TRACE(FATAL, "mpdu age out: %u",
7042 			pdev->stats.tx.dropped.age_out);
7043 		DP_TRACE(FATAL, "firmware removed: %u",
7044 			pdev->stats.tx.dropped.fw_rem);
7045 		DP_TRACE(FATAL, "firmware removed tx: %u",
7046 			pdev->stats.tx.dropped.fw_rem_tx);
7047 		DP_TRACE(FATAL, "firmware removed notx %u",
7048 			pdev->stats.tx.dropped.fw_rem_notx);
7049 		DP_TRACE(FATAL, "peer_invalid: %u",
7050 			pdev->soc->stats.tx.tx_invalid_peer.num);
7051 
7052 
7053 		DP_TRACE(FATAL, "Tx packets sent per interrupt:");
7054 		DP_TRACE(FATAL, "Single Packet: %u",
7055 			pdev->stats.tx_comp_histogram.pkts_1);
7056 		DP_TRACE(FATAL, "2-20 Packets:  %u",
7057 			pdev->stats.tx_comp_histogram.pkts_2_20);
7058 		DP_TRACE(FATAL, "21-40 Packets: %u",
7059 			pdev->stats.tx_comp_histogram.pkts_21_40);
7060 		DP_TRACE(FATAL, "41-60 Packets: %u",
7061 			pdev->stats.tx_comp_histogram.pkts_41_60);
7062 		DP_TRACE(FATAL, "61-80 Packets: %u",
7063 			pdev->stats.tx_comp_histogram.pkts_61_80);
7064 		DP_TRACE(FATAL, "81-100 Packets: %u",
7065 			pdev->stats.tx_comp_histogram.pkts_81_100);
7066 		DP_TRACE(FATAL, "101-200 Packets: %u",
7067 			pdev->stats.tx_comp_histogram.pkts_101_200);
7068 		DP_TRACE(FATAL, "   201+ Packets: %u",
7069 			pdev->stats.tx_comp_histogram.pkts_201_plus);
7070 
7071 		DP_TRACE(FATAL, "Rx path statistics");
7072 
7073 		DP_TRACE(FATAL, "delivered %u msdus ( %llu bytes),",
7074 			pdev->stats.rx.to_stack.num,
7075 			pdev->stats.rx.to_stack.bytes);
7076 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)
7077 			DP_TRACE(FATAL, "received on reo[%d] %u msdus ( %llu bytes),",
7078 					i, pdev->stats.rx.rcvd_reo[i].num,
7079 					pdev->stats.rx.rcvd_reo[i].bytes);
7080 		DP_TRACE(FATAL, "intra-bss packets %u msdus ( %llu bytes),",
7081 			pdev->stats.rx.intra_bss.pkts.num,
7082 			pdev->stats.rx.intra_bss.pkts.bytes);
7083 		DP_TRACE(FATAL, "intra-bss fails %u msdus ( %llu bytes),",
7084 			pdev->stats.rx.intra_bss.fail.num,
7085 			pdev->stats.rx.intra_bss.fail.bytes);
7086 		DP_TRACE(FATAL, "raw packets %u msdus ( %llu bytes),",
7087 			pdev->stats.rx.raw.num,
7088 			pdev->stats.rx.raw.bytes);
7089 		DP_TRACE(FATAL, "dropped: error %u msdus",
7090 			pdev->stats.rx.err.mic_err);
7091 		DP_TRACE(FATAL, "peer invalid %u",
7092 			pdev->soc->stats.rx.err.rx_invalid_peer.num);
7093 
7094 		DP_TRACE(FATAL, "Reo Statistics");
7095 		DP_TRACE(FATAL, "rbm error: %u msdus",
7096 			pdev->soc->stats.rx.err.invalid_rbm);
7097 		DP_TRACE(FATAL, "hal ring access fail: %u msdus",
7098 			pdev->soc->stats.rx.err.hal_ring_access_fail);
7099 
7100 		for (error_code = 0; error_code < HAL_REO_ERR_MAX;
7101 				error_code++) {
7102 			if (!pdev->soc->stats.rx.err.reo_error[error_code])
7103 				continue;
7104 			DP_TRACE(FATAL, "Reo error number (%u): %u msdus",
7105 				error_code,
7106 				pdev->soc->stats.rx.err.reo_error[error_code]);
7107 		}
7108 
7109 		for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
7110 				error_code++) {
7111 			if (!pdev->soc->stats.rx.err.rxdma_error[error_code])
7112 				continue;
7113 			DP_TRACE(FATAL, "Rxdma error number (%u): %u msdus",
7114 				error_code,
7115 				pdev->soc->stats.rx.err
7116 				.rxdma_error[error_code]);
7117 		}
7118 
7119 		DP_TRACE(FATAL, "Rx packets reaped per interrupt:");
7120 		DP_TRACE(FATAL, "Single Packet: %u",
7121 			 pdev->stats.rx_ind_histogram.pkts_1);
7122 		DP_TRACE(FATAL, "2-20 Packets:  %u",
7123 			 pdev->stats.rx_ind_histogram.pkts_2_20);
7124 		DP_TRACE(FATAL, "21-40 Packets: %u",
7125 			 pdev->stats.rx_ind_histogram.pkts_21_40);
7126 		DP_TRACE(FATAL, "41-60 Packets: %u",
7127 			 pdev->stats.rx_ind_histogram.pkts_41_60);
7128 		DP_TRACE(FATAL, "61-80 Packets: %u",
7129 			 pdev->stats.rx_ind_histogram.pkts_61_80);
7130 		DP_TRACE(FATAL, "81-100 Packets: %u",
7131 			 pdev->stats.rx_ind_histogram.pkts_81_100);
7132 		DP_TRACE(FATAL, "101-200 Packets: %u",
7133 			 pdev->stats.rx_ind_histogram.pkts_101_200);
7134 		DP_TRACE(FATAL, "   201+ Packets: %u",
7135 			 pdev->stats.rx_ind_histogram.pkts_201_plus);
7136 
7137 		DP_TRACE_STATS(ERROR, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
7138 			__func__,
7139 			pdev->soc->wlan_cfg_ctx->tso_enabled,
7140 			pdev->soc->wlan_cfg_ctx->lro_enabled,
7141 			pdev->soc->wlan_cfg_ctx->rx_hash,
7142 			pdev->soc->wlan_cfg_ctx->napi_enabled);
7143 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7144 		DP_TRACE_STATS(ERROR, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
7145 			__func__,
7146 			pdev->soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold,
7147 			pdev->soc->wlan_cfg_ctx->tx_flow_start_queue_offset);
7148 #endif
7149 	}
7150 }
7151 
7152 /*
7153  * dp_txrx_dump_stats() -  Dump statistics
7154  * @value - Statistics option
7155  */
7156 static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
7157 				     enum qdf_stats_verbosity_level level)
7158 {
7159 	struct dp_soc *soc =
7160 		(struct dp_soc *)psoc;
7161 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7162 
7163 	if (!soc) {
7164 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7165 			"%s: soc is NULL", __func__);
7166 		return QDF_STATUS_E_INVAL;
7167 	}
7168 
7169 	switch (value) {
7170 	case CDP_TXRX_PATH_STATS:
7171 		dp_txrx_path_stats(soc);
7172 		break;
7173 
7174 	case CDP_RX_RING_STATS:
7175 		dp_print_per_ring_stats(soc);
7176 		break;
7177 
7178 	case CDP_TXRX_TSO_STATS:
7179 		/* TODO: NOT IMPLEMENTED */
7180 		break;
7181 
7182 	case CDP_DUMP_TX_FLOW_POOL_INFO:
7183 		cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
7184 		break;
7185 
7186 	case CDP_DP_NAPI_STATS:
7187 		dp_print_napi_stats(soc);
7188 		break;
7189 
7190 	case CDP_TXRX_DESC_STATS:
7191 		/* TODO: NOT IMPLEMENTED */
7192 		break;
7193 
7194 	default:
7195 		status = QDF_STATUS_E_INVAL;
7196 		break;
7197 	}
7198 
7199 	return status;
7200 
7201 }
7202 
7203 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7204 /**
7205  * dp_update_flow_control_parameters() - API to store datapath
7206  *                            config parameters
7207  * @soc: soc handle
7208  * @cfg: ini parameter handle
7209  *
7210  * Return: void
7211  */
7212 static inline
7213 void dp_update_flow_control_parameters(struct dp_soc *soc,
7214 				struct cdp_config_params *params)
7215 {
7216 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
7217 					params->tx_flow_stop_queue_threshold;
7218 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
7219 					params->tx_flow_start_queue_offset;
7220 }
7221 #else
7222 static inline
7223 void dp_update_flow_control_parameters(struct dp_soc *soc,
7224 				struct cdp_config_params *params)
7225 {
7226 }
7227 #endif
7228 
7229 /**
7230  * dp_update_config_parameters() - API to store datapath
7231  *                            config parameters
7232  * @soc: soc handle
7233  * @cfg: ini parameter handle
7234  *
7235  * Return: status
7236  */
7237 static
7238 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
7239 				struct cdp_config_params *params)
7240 {
7241 	struct dp_soc *soc = (struct dp_soc *)psoc;
7242 
7243 	if (!(soc)) {
7244 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7245 				"%s: Invalid handle", __func__);
7246 		return QDF_STATUS_E_INVAL;
7247 	}
7248 
7249 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
7250 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
7251 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
7252 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
7253 				params->tcp_udp_checksumoffload;
7254 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
7255 	dp_update_flow_control_parameters(soc, params);
7256 
7257 	return QDF_STATUS_SUCCESS;
7258 }
7259 
7260 /**
7261  * dp_txrx_set_wds_rx_policy() - API to store datapath
7262  *                            config parameters
7263  * @vdev_handle - datapath vdev handle
7264  * @cfg: ini parameter handle
7265  *
7266  * Return: status
7267  */
7268 #ifdef WDS_VENDOR_EXTENSION
7269 void
7270 dp_txrx_set_wds_rx_policy(
7271 		struct cdp_vdev *vdev_handle,
7272 		u_int32_t val)
7273 {
7274 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7275 	struct dp_peer *peer;
7276 	if (vdev->opmode == wlan_op_mode_ap) {
7277 		/* for ap, set it on bss_peer */
7278 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
7279 			if (peer->bss_peer) {
7280 				peer->wds_ecm.wds_rx_filter = 1;
7281 				peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7282 				peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7283 				break;
7284 			}
7285 		}
7286 	} else if (vdev->opmode == wlan_op_mode_sta) {
7287 		peer = TAILQ_FIRST(&vdev->peer_list);
7288 		peer->wds_ecm.wds_rx_filter = 1;
7289 		peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7290 		peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7291 	}
7292 }
7293 
7294 /**
7295  * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
7296  *
7297  * @peer_handle - datapath peer handle
7298  * @wds_tx_ucast: policy for unicast transmission
7299  * @wds_tx_mcast: policy for multicast transmission
7300  *
7301  * Return: void
7302  */
7303 void
7304 dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
7305 		int wds_tx_ucast, int wds_tx_mcast)
7306 {
7307 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
7308 	if (wds_tx_ucast || wds_tx_mcast) {
7309 		peer->wds_enabled = 1;
7310 		peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
7311 		peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
7312 	} else {
7313 		peer->wds_enabled = 0;
7314 		peer->wds_ecm.wds_tx_ucast_4addr = 0;
7315 		peer->wds_ecm.wds_tx_mcast_4addr = 0;
7316 	}
7317 
7318 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7319 			FL("Policy Update set to :\
7320 				peer->wds_enabled %d\
7321 				peer->wds_ecm.wds_tx_ucast_4addr %d\
7322 				peer->wds_ecm.wds_tx_mcast_4addr %d"),
7323 				peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
7324 				peer->wds_ecm.wds_tx_mcast_4addr);
7325 	return;
7326 }
7327 #endif
7328 
7329 static struct cdp_wds_ops dp_ops_wds = {
7330 	.vdev_set_wds = dp_vdev_set_wds,
7331 #ifdef WDS_VENDOR_EXTENSION
7332 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
7333 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
7334 #endif
7335 };
7336 
7337 /*
7338  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
7339  * @vdev_handle - datapath vdev handle
7340  * @callback - callback function
7341  * @ctxt: callback context
7342  *
7343  */
7344 static void
7345 dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
7346 		       ol_txrx_data_tx_cb callback, void *ctxt)
7347 {
7348 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7349 
7350 	vdev->tx_non_std_data_callback.func = callback;
7351 	vdev->tx_non_std_data_callback.ctxt = ctxt;
7352 }
7353 
7354 /**
7355  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
7356  * @pdev_hdl: datapath pdev handle
7357  *
7358  * Return: opaque pointer to dp txrx handle
7359  */
7360 static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
7361 {
7362 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7363 
7364 	return pdev->dp_txrx_handle;
7365 }
7366 
7367 /**
7368  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
7369  * @pdev_hdl: datapath pdev handle
7370  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
7371  *
7372  * Return: void
7373  */
7374 static void
7375 dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
7376 {
7377 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7378 
7379 	pdev->dp_txrx_handle = dp_txrx_hdl;
7380 }
7381 
7382 /**
7383  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
7384  * @soc_handle: datapath soc handle
7385  *
7386  * Return: opaque pointer to external dp (non-core DP)
7387  */
7388 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
7389 {
7390 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
7391 
7392 	return soc->external_txrx_handle;
7393 }
7394 
7395 /**
7396  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
7397  * @soc_handle: datapath soc handle
7398  * @txrx_handle: opaque pointer to external dp (non-core DP)
7399  *
7400  * Return: void
7401  */
7402 static void
7403 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
7404 {
7405 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
7406 
7407 	soc->external_txrx_handle = txrx_handle;
7408 }
7409 
7410 #ifdef FEATURE_AST
7411 static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
7412 {
7413 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
7414 	struct dp_peer *peer = (struct dp_peer *) peer_hdl;
7415 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7416 
7417 	/*
7418 	 * For BSS peer, new peer is not created on alloc_node if the
7419 	 * peer with same address already exists , instead refcnt is
7420 	 * increased for existing peer. Correspondingly in delete path,
7421 	 * only refcnt is decreased; and peer is only deleted , when all
7422 	 * references are deleted. So delete_in_progress should not be set
7423 	 * for bss_peer, unless only 2 reference remains (peer map reference
7424 	 * and peer hash table reference).
7425 	 */
7426 	if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
7427 		return;
7428 	}
7429 
7430 	peer->delete_in_progress = true;
7431 	dp_peer_delete_ast_entries(soc, peer);
7432 }
7433 #endif
7434 
7435 #ifdef ATH_SUPPORT_NAC_RSSI
7436 /**
7437  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
7438  * @vdev_hdl: DP vdev handle
7439  * @rssi: rssi value
7440  *
7441  * Return: 0 for success. nonzero for failure.
7442  */
7443 QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
7444 				       char *mac_addr,
7445 				       uint8_t *rssi)
7446 {
7447 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7448 	struct dp_pdev *pdev = vdev->pdev;
7449 	struct dp_neighbour_peer *peer = NULL;
7450 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
7451 
7452 	*rssi = 0;
7453 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
7454 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
7455 		      neighbour_peer_list_elem) {
7456 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
7457 				mac_addr, DP_MAC_ADDR_LEN) == 0) {
7458 			*rssi = peer->rssi;
7459 			status = QDF_STATUS_SUCCESS;
7460 			break;
7461 		}
7462 	}
7463 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
7464 	return status;
7465 }
7466 
7467 static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
7468 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
7469 		uint8_t chan_num)
7470 {
7471 
7472 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7473 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7474 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7475 
7476 	pdev->nac_rssi_filtering = 1;
7477 	/* Store address of NAC (neighbour peer) which will be checked
7478 	 * against TA of received packets.
7479 	 */
7480 
7481 	if (cmd == CDP_NAC_PARAM_ADD) {
7482 		dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
7483 						 client_macaddr);
7484 	} else if (cmd == CDP_NAC_PARAM_DEL) {
7485 		dp_update_filter_neighbour_peers(vdev_handle,
7486 						 DP_NAC_PARAM_DEL,
7487 						 client_macaddr);
7488 	}
7489 
7490 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
7491 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
7492 			((void *)vdev->pdev->ctrl_pdev,
7493 			 vdev->vdev_id, cmd, bssid);
7494 
7495 	return QDF_STATUS_SUCCESS;
7496 }
7497 #endif
7498 
7499 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
7500 		uint32_t max_peers)
7501 {
7502 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7503 
7504 	soc->max_peers = max_peers;
7505 
7506 	qdf_print ("%s max_peers %u\n", __func__, max_peers);
7507 
7508 	if (dp_peer_find_attach(soc))
7509 		return QDF_STATUS_E_FAILURE;
7510 
7511 	return QDF_STATUS_SUCCESS;
7512 }
7513 
7514 /**
7515  * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
7516  * @dp_pdev: dp pdev handle
7517  * @ctrl_pdev: UMAC ctrl pdev handle
7518  *
7519  * Return: void
7520  */
7521 static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
7522 				  struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
7523 {
7524 	struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
7525 
7526 	pdev->ctrl_pdev = ctrl_pdev;
7527 }
7528 
7529 static struct cdp_cmn_ops dp_ops_cmn = {
7530 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
7531 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
7532 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
7533 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
7534 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
7535 	.txrx_peer_create = dp_peer_create_wifi3,
7536 	.txrx_peer_setup = dp_peer_setup_wifi3,
7537 #ifdef FEATURE_AST
7538 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
7539 #else
7540 	.txrx_peer_teardown = NULL,
7541 #endif
7542 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
7543 	.txrx_peer_del_ast = dp_peer_del_ast_wifi3,
7544 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
7545 	.txrx_peer_ast_hash_find = dp_peer_ast_hash_find_wifi3,
7546 	.txrx_peer_ast_get_pdev_id = dp_peer_ast_get_pdev_id_wifi3,
7547 	.txrx_peer_ast_get_next_hop = dp_peer_ast_get_next_hop_wifi3,
7548 	.txrx_peer_ast_set_type = dp_peer_ast_set_type_wifi3,
7549 	.txrx_peer_ast_get_type = dp_peer_ast_get_type_wifi3,
7550 	.txrx_peer_delete = dp_peer_delete_wifi3,
7551 	.txrx_vdev_register = dp_vdev_register_wifi3,
7552 	.txrx_soc_detach = dp_soc_detach_wifi3,
7553 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
7554 	.txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
7555 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
7556 	.txrx_ath_getstats = dp_get_device_stats,
7557 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
7558 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
7559 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
7560 	.delba_process = dp_delba_process_wifi3,
7561 	.set_addba_response = dp_set_addba_response,
7562 	.get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
7563 	.flush_cache_rx_queue = NULL,
7564 	/* TODO: get API's for dscp-tid need to be added*/
7565 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
7566 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
7567 	.txrx_stats_request = dp_txrx_stats_request,
7568 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
7569 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
7570 	.txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
7571 	.txrx_set_nac = dp_set_nac,
7572 	.txrx_get_tx_pending = dp_get_tx_pending,
7573 	.txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
7574 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
7575 	.display_stats = dp_txrx_dump_stats,
7576 	.txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
7577 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
7578 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
7579 	.txrx_intr_detach = dp_soc_interrupt_detach,
7580 	.set_pn_check = dp_set_pn_check_wifi3,
7581 	.update_config_parameters = dp_update_config_parameters,
7582 	/* TODO: Add other functions */
7583 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
7584 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
7585 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
7586 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
7587 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
7588 	.tx_send = dp_tx_send,
7589 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
7590 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
7591 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
7592 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
7593 	.txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
7594 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
7595 };
7596 
7597 static struct cdp_ctrl_ops dp_ops_ctrl = {
7598 	.txrx_peer_authorize = dp_peer_authorize,
7599 #ifdef QCA_SUPPORT_SON
7600 	.txrx_set_inact_params = dp_set_inact_params,
7601 	.txrx_start_inact_timer = dp_start_inact_timer,
7602 	.txrx_set_overload = dp_set_overload,
7603 	.txrx_peer_is_inact = dp_peer_is_inact,
7604 	.txrx_mark_peer_inact = dp_mark_peer_inact,
7605 #endif
7606 	.txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
7607 	.txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
7608 #ifdef MESH_MODE_SUPPORT
7609 	.txrx_set_mesh_mode  = dp_peer_set_mesh_mode,
7610 	.txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
7611 #endif
7612 	.txrx_set_vdev_param = dp_set_vdev_param,
7613 	.txrx_peer_set_nawds = dp_peer_set_nawds,
7614 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
7615 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
7616 	.txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
7617 	.txrx_update_filter_neighbour_peers =
7618 		dp_update_filter_neighbour_peers,
7619 	.txrx_get_sec_type = dp_get_sec_type,
7620 	/* TODO: Add other functions */
7621 	.txrx_wdi_event_sub = dp_wdi_event_sub,
7622 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
7623 #ifdef WDI_EVENT_ENABLE
7624 	.txrx_get_pldev = dp_get_pldev,
7625 #endif
7626 	.txrx_set_pdev_param = dp_set_pdev_param,
7627 #ifdef ATH_SUPPORT_NAC_RSSI
7628 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
7629 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
7630 #endif
7631 	.set_key = dp_set_michael_key,
7632 };
7633 
7634 static struct cdp_me_ops dp_ops_me = {
7635 #ifdef ATH_SUPPORT_IQUE
7636 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
7637 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
7638 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
7639 #endif
7640 };
7641 
7642 static struct cdp_mon_ops dp_ops_mon = {
7643 	.txrx_monitor_set_filter_ucast_data = NULL,
7644 	.txrx_monitor_set_filter_mcast_data = NULL,
7645 	.txrx_monitor_set_filter_non_data = NULL,
7646 	.txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
7647 	.txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
7648 	.txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
7649 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
7650 	/* Added support for HK advance filter */
7651 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
7652 };
7653 
7654 static struct cdp_host_stats_ops dp_ops_host_stats = {
7655 	.txrx_per_peer_stats = dp_get_host_peer_stats,
7656 	.get_fw_peer_stats = dp_get_fw_peer_stats,
7657 	.get_htt_stats = dp_get_htt_stats,
7658 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
7659 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
7660 	.txrx_stats_publish = dp_txrx_stats_publish,
7661 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
7662 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
7663 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
7664 	/* TODO */
7665 };
7666 
7667 static struct cdp_raw_ops dp_ops_raw = {
7668 	/* TODO */
7669 };
7670 
7671 #ifdef CONFIG_WIN
7672 static struct cdp_pflow_ops dp_ops_pflow = {
7673 	/* TODO */
7674 };
7675 #endif /* CONFIG_WIN */
7676 
7677 #ifdef FEATURE_RUNTIME_PM
7678 /**
7679  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
7680  * @opaque_pdev: DP pdev context
7681  *
7682  * DP is ready to runtime suspend if there are no pending TX packets.
7683  *
7684  * Return: QDF_STATUS
7685  */
7686 static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
7687 {
7688 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7689 	struct dp_soc *soc = pdev->soc;
7690 
7691 	/* Call DP TX flow control API to check if there is any
7692 	   pending packets */
7693 
7694 	if (soc->intr_mode == DP_INTR_POLL)
7695 		qdf_timer_stop(&soc->int_timer);
7696 
7697 	return QDF_STATUS_SUCCESS;
7698 }
7699 
7700 /**
7701  * dp_runtime_resume() - ensure DP is ready to runtime resume
7702  * @opaque_pdev: DP pdev context
7703  *
7704  * Resume DP for runtime PM.
7705  *
7706  * Return: QDF_STATUS
7707  */
7708 static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
7709 {
7710 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7711 	struct dp_soc *soc = pdev->soc;
7712 	void *hal_srng;
7713 	int i;
7714 
7715 	if (soc->intr_mode == DP_INTR_POLL)
7716 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7717 
7718 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
7719 		hal_srng = soc->tcl_data_ring[i].hal_srng;
7720 		if (hal_srng) {
7721 			/* We actually only need to acquire the lock */
7722 			hal_srng_access_start(soc->hal_soc, hal_srng);
7723 			/* Update SRC ring head pointer for HW to send
7724 			   all pending packets */
7725 			hal_srng_access_end(soc->hal_soc, hal_srng);
7726 		}
7727 	}
7728 
7729 	return QDF_STATUS_SUCCESS;
7730 }
7731 #endif /* FEATURE_RUNTIME_PM */
7732 
7733 static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
7734 {
7735 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7736 	struct dp_soc *soc = pdev->soc;
7737 
7738 	if (soc->intr_mode == DP_INTR_POLL)
7739 		qdf_timer_stop(&soc->int_timer);
7740 
7741 	return QDF_STATUS_SUCCESS;
7742 }
7743 
7744 static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
7745 {
7746 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7747 	struct dp_soc *soc = pdev->soc;
7748 
7749 	if (soc->intr_mode == DP_INTR_POLL)
7750 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7751 
7752 	return QDF_STATUS_SUCCESS;
7753 }
7754 
7755 #ifndef CONFIG_WIN
7756 static struct cdp_misc_ops dp_ops_misc = {
7757 	.tx_non_std = dp_tx_non_std,
7758 	.get_opmode = dp_get_opmode,
7759 #ifdef FEATURE_RUNTIME_PM
7760 	.runtime_suspend = dp_runtime_suspend,
7761 	.runtime_resume = dp_runtime_resume,
7762 #endif /* FEATURE_RUNTIME_PM */
7763 	.pkt_log_init = dp_pkt_log_init,
7764 	.pkt_log_con_service = dp_pkt_log_con_service,
7765 };
7766 
7767 static struct cdp_flowctl_ops dp_ops_flowctl = {
7768 	/* WIFI 3.0 DP implement as required. */
7769 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7770 	.flow_pool_map_handler = dp_tx_flow_pool_map,
7771 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
7772 	.register_pause_cb = dp_txrx_register_pause_cb,
7773 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
7774 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
7775 };
7776 
7777 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
7778 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7779 };
7780 
7781 #ifdef IPA_OFFLOAD
7782 static struct cdp_ipa_ops dp_ops_ipa = {
7783 	.ipa_get_resource = dp_ipa_get_resource,
7784 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
7785 	.ipa_op_response = dp_ipa_op_response,
7786 	.ipa_register_op_cb = dp_ipa_register_op_cb,
7787 	.ipa_get_stat = dp_ipa_get_stat,
7788 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
7789 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
7790 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
7791 	.ipa_setup = dp_ipa_setup,
7792 	.ipa_cleanup = dp_ipa_cleanup,
7793 	.ipa_setup_iface = dp_ipa_setup_iface,
7794 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
7795 	.ipa_enable_pipes = dp_ipa_enable_pipes,
7796 	.ipa_disable_pipes = dp_ipa_disable_pipes,
7797 	.ipa_set_perf_level = dp_ipa_set_perf_level
7798 };
7799 #endif
7800 
7801 static struct cdp_bus_ops dp_ops_bus = {
7802 	.bus_suspend = dp_bus_suspend,
7803 	.bus_resume = dp_bus_resume
7804 };
7805 
7806 static struct cdp_ocb_ops dp_ops_ocb = {
7807 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7808 };
7809 
7810 
7811 static struct cdp_throttle_ops dp_ops_throttle = {
7812 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7813 };
7814 
7815 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
7816 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7817 };
7818 
7819 static struct cdp_cfg_ops dp_ops_cfg = {
7820 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7821 };
7822 
7823 /*
7824  * dp_wrapper_peer_get_ref_by_addr - wrapper function to get to peer
7825  * @dev: physical device instance
7826  * @peer_mac_addr: peer mac address
7827  * @local_id: local id for the peer
7828  * @debug_id: to track enum peer access
7829 
7830  * Return: peer instance pointer
7831  */
7832 static inline void *
7833 dp_wrapper_peer_get_ref_by_addr(struct cdp_pdev *dev, u8 *peer_mac_addr,
7834 				u8 *local_id,
7835 				enum peer_debug_id_type debug_id)
7836 {
7837 	/*
7838 	 * Currently this function does not implement the "get ref"
7839 	 * functionality and is mapped to dp_find_peer_by_addr which does not
7840 	 * increment the peer ref count. So the peer state is uncertain after
7841 	 * calling this API. The functionality needs to be implemented.
7842 	 * Accordingly the corresponding release_ref function is NULL.
7843 	 */
7844 	return dp_find_peer_by_addr(dev, peer_mac_addr, local_id);
7845 }
7846 
7847 static struct cdp_peer_ops dp_ops_peer = {
7848 	.register_peer = dp_register_peer,
7849 	.clear_peer = dp_clear_peer,
7850 	.find_peer_by_addr = dp_find_peer_by_addr,
7851 	.find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
7852 	.peer_get_ref_by_addr = dp_wrapper_peer_get_ref_by_addr,
7853 	.peer_release_ref = NULL,
7854 	.local_peer_id = dp_local_peer_id,
7855 	.peer_find_by_local_id = dp_peer_find_by_local_id,
7856 	.peer_state_update = dp_peer_state_update,
7857 	.get_vdevid = dp_get_vdevid,
7858 	.get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
7859 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
7860 	.get_vdev_for_peer = dp_get_vdev_for_peer,
7861 	.get_peer_state = dp_get_peer_state,
7862 	.get_last_mgmt_timestamp = dp_get_last_mgmt_timestamp,
7863 	.update_last_mgmt_timestamp = dp_update_last_mgmt_timestamp,
7864 };
7865 #endif
7866 
7867 static struct cdp_ops dp_txrx_ops = {
7868 	.cmn_drv_ops = &dp_ops_cmn,
7869 	.ctrl_ops = &dp_ops_ctrl,
7870 	.me_ops = &dp_ops_me,
7871 	.mon_ops = &dp_ops_mon,
7872 	.host_stats_ops = &dp_ops_host_stats,
7873 	.wds_ops = &dp_ops_wds,
7874 	.raw_ops = &dp_ops_raw,
7875 #ifdef CONFIG_WIN
7876 	.pflow_ops = &dp_ops_pflow,
7877 #endif /* CONFIG_WIN */
7878 #ifndef CONFIG_WIN
7879 	.misc_ops = &dp_ops_misc,
7880 	.cfg_ops = &dp_ops_cfg,
7881 	.flowctl_ops = &dp_ops_flowctl,
7882 	.l_flowctl_ops = &dp_ops_l_flowctl,
7883 #ifdef IPA_OFFLOAD
7884 	.ipa_ops = &dp_ops_ipa,
7885 #endif
7886 	.bus_ops = &dp_ops_bus,
7887 	.ocb_ops = &dp_ops_ocb,
7888 	.peer_ops = &dp_ops_peer,
7889 	.throttle_ops = &dp_ops_throttle,
7890 	.mob_stats_ops = &dp_ops_mob_stats,
7891 #endif
7892 };
7893 
7894 /*
7895  * dp_soc_set_txrx_ring_map()
7896  * @dp_soc: DP handler for soc
7897  *
7898  * Return: Void
7899  */
7900 static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
7901 {
7902 	uint32_t i;
7903 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
7904 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_DEFAULT_MAP][i];
7905 	}
7906 }
7907 
7908 #ifdef QCA_WIFI_QCA8074
7909 /**
7910  * dp_soc_attach_wifi3() - Attach txrx SOC
7911  * @ctrl_psoc:	Opaque SOC handle from control plane
7912  * @htc_handle:	Opaque HTC handle
7913  * @hif_handle:	Opaque HIF handle
7914  * @qdf_osdev:	QDF device
7915  * @ol_ops:	Offload Operations
7916  * @device_id:	Device ID
7917  *
7918  * Return: DP SOC handle on success, NULL on failure
7919  */
7920 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
7921 			  HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
7922 			  struct ol_if_ops *ol_ops, uint16_t device_id)
7923 {
7924 	struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
7925 	int target_type;
7926 
7927 	if (!soc) {
7928 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7929 			FL("DP SOC memory allocation failed"));
7930 		goto fail0;
7931 	}
7932 
7933 	soc->device_id = device_id;
7934 	soc->cdp_soc.ops = &dp_txrx_ops;
7935 	soc->cdp_soc.ol_ops = ol_ops;
7936 	soc->ctrl_psoc = ctrl_psoc;
7937 	soc->osdev = qdf_osdev;
7938 	soc->hif_handle = hif_handle;
7939 
7940 	soc->hal_soc = hif_get_hal_handle(hif_handle);
7941 	soc->htt_handle = htt_soc_attach(soc, ctrl_psoc, htc_handle,
7942 		soc->hal_soc, qdf_osdev);
7943 	if (!soc->htt_handle) {
7944 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7945 			FL("HTT attach failed"));
7946 		goto fail1;
7947 	}
7948 
7949 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
7950 	if (!soc->wlan_cfg_ctx) {
7951 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7952 			FL("wlan_cfg_soc_attach failed"));
7953 		goto fail2;
7954 	}
7955 	target_type = hal_get_target_type(soc->hal_soc);
7956 	switch (target_type) {
7957 	case TARGET_TYPE_QCA6290:
7958 #ifdef QCA_WIFI_QCA6390
7959 	case TARGET_TYPE_QCA6390:
7960 #endif
7961 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
7962 					       REO_DST_RING_SIZE_QCA6290);
7963 		break;
7964 	case TARGET_TYPE_QCA8074:
7965 	case TARGET_TYPE_QCA8074V2:
7966 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
7967 					       REO_DST_RING_SIZE_QCA8074);
7968 		break;
7969 	default:
7970 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
7971 		qdf_assert_always(0);
7972 		break;
7973 	}
7974 
7975 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
7976 			     cfg_get(ctrl_psoc, CFG_DP_RX_HASH));
7977 	soc->cce_disable = false;
7978 
7979 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
7980 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
7981 				CDP_CFG_MAX_PEER_ID);
7982 
7983 		if (ret != -EINVAL) {
7984 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
7985 		}
7986 
7987 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
7988 				CDP_CFG_CCE_DISABLE);
7989 		if (ret == 1)
7990 			soc->cce_disable = true;
7991 	}
7992 
7993 	qdf_spinlock_create(&soc->peer_ref_mutex);
7994 
7995 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
7996 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
7997 
7998 	/* fill the tx/rx cpu ring map*/
7999 	dp_soc_set_txrx_ring_map(soc);
8000 
8001 	qdf_spinlock_create(&soc->htt_stats.lock);
8002 	/* initialize work queue for stats processing */
8003 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
8004 
8005 	/*Initialize inactivity timer for wifison */
8006 	dp_init_inact_timer(soc);
8007 
8008 	return (void *)soc;
8009 
8010 fail2:
8011 	htt_soc_detach(soc->htt_handle);
8012 fail1:
8013 	qdf_mem_free(soc);
8014 fail0:
8015 	return NULL;
8016 }
8017 #endif
8018 
8019 /*
8020  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
8021  *
8022  * @soc: handle to DP soc
8023  * @mac_id: MAC id
8024  *
8025  * Return: Return pdev corresponding to MAC
8026  */
8027 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
8028 {
8029 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
8030 		return soc->pdev_list[mac_id];
8031 
8032 	/* Typically for MCL as there only 1 PDEV*/
8033 	return soc->pdev_list[0];
8034 }
8035 
8036 /*
8037  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
8038  * @soc:		DP SoC context
8039  * @max_mac_rings:	No of MAC rings
8040  *
8041  * Return: None
8042  */
8043 static
8044 void dp_is_hw_dbs_enable(struct dp_soc *soc,
8045 				int *max_mac_rings)
8046 {
8047 	bool dbs_enable = false;
8048 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
8049 		dbs_enable = soc->cdp_soc.ol_ops->
8050 		is_hw_dbs_2x2_capable(soc->ctrl_psoc);
8051 
8052 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
8053 }
8054 
8055 /*
8056 * dp_set_pktlog_wifi3() - attach txrx vdev
8057 * @pdev: Datapath PDEV handle
8058 * @event: which event's notifications are being subscribed to
8059 * @enable: WDI event subscribe or not. (True or False)
8060 *
8061 * Return: Success, NULL on failure
8062 */
8063 #ifdef WDI_EVENT_ENABLE
8064 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
8065 	bool enable)
8066 {
8067 	struct dp_soc *soc = pdev->soc;
8068 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
8069 	int max_mac_rings = wlan_cfg_get_num_mac_rings
8070 					(pdev->wlan_cfg_ctx);
8071 	uint8_t mac_id = 0;
8072 
8073 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
8074 
8075 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
8076 			FL("Max_mac_rings %d "),
8077 			max_mac_rings);
8078 
8079 	if (enable) {
8080 		switch (event) {
8081 		case WDI_EVENT_RX_DESC:
8082 			if (pdev->monitor_vdev) {
8083 				/* Nothing needs to be done if monitor mode is
8084 				 * enabled
8085 				 */
8086 				return 0;
8087 			}
8088 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
8089 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
8090 				htt_tlv_filter.mpdu_start = 1;
8091 				htt_tlv_filter.msdu_start = 1;
8092 				htt_tlv_filter.msdu_end = 1;
8093 				htt_tlv_filter.mpdu_end = 1;
8094 				htt_tlv_filter.packet_header = 1;
8095 				htt_tlv_filter.attention = 1;
8096 				htt_tlv_filter.ppdu_start = 1;
8097 				htt_tlv_filter.ppdu_end = 1;
8098 				htt_tlv_filter.ppdu_end_user_stats = 1;
8099 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
8100 				htt_tlv_filter.ppdu_end_status_done = 1;
8101 				htt_tlv_filter.enable_fp = 1;
8102 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8103 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8104 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8105 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8106 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8107 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
8108 
8109 				for (mac_id = 0; mac_id < max_mac_rings;
8110 								mac_id++) {
8111 					int mac_for_pdev =
8112 						dp_get_mac_id_for_pdev(mac_id,
8113 								pdev->pdev_id);
8114 
8115 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8116 					 mac_for_pdev,
8117 					 pdev->rxdma_mon_status_ring[mac_id]
8118 					 .hal_srng,
8119 					 RXDMA_MONITOR_STATUS,
8120 					 RX_BUFFER_SIZE,
8121 					 &htt_tlv_filter);
8122 
8123 				}
8124 
8125 				if (soc->reap_timer_init)
8126 					qdf_timer_mod(&soc->mon_reap_timer,
8127 					DP_INTR_POLL_TIMER_MS);
8128 			}
8129 			break;
8130 
8131 		case WDI_EVENT_LITE_RX:
8132 			if (pdev->monitor_vdev) {
8133 				/* Nothing needs to be done if monitor mode is
8134 				 * enabled
8135 				 */
8136 				return 0;
8137 			}
8138 
8139 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
8140 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
8141 
8142 				htt_tlv_filter.ppdu_start = 1;
8143 				htt_tlv_filter.ppdu_end = 1;
8144 				htt_tlv_filter.ppdu_end_user_stats = 1;
8145 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
8146 				htt_tlv_filter.ppdu_end_status_done = 1;
8147 				htt_tlv_filter.mpdu_start = 1;
8148 				htt_tlv_filter.enable_fp = 1;
8149 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8150 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8151 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8152 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8153 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8154 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
8155 
8156 				for (mac_id = 0; mac_id < max_mac_rings;
8157 								mac_id++) {
8158 					int mac_for_pdev =
8159 						dp_get_mac_id_for_pdev(mac_id,
8160 								pdev->pdev_id);
8161 
8162 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8163 					mac_for_pdev,
8164 					pdev->rxdma_mon_status_ring[mac_id]
8165 					.hal_srng,
8166 					RXDMA_MONITOR_STATUS,
8167 					RX_BUFFER_SIZE_PKTLOG_LITE,
8168 					&htt_tlv_filter);
8169 				}
8170 
8171 				if (soc->reap_timer_init)
8172 					qdf_timer_mod(&soc->mon_reap_timer,
8173 					DP_INTR_POLL_TIMER_MS);
8174 			}
8175 			break;
8176 
8177 		case WDI_EVENT_LITE_T2H:
8178 			if (pdev->monitor_vdev) {
8179 				/* Nothing needs to be done if monitor mode is
8180 				 * enabled
8181 				 */
8182 				return 0;
8183 			}
8184 
8185 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
8186 				int mac_for_pdev = dp_get_mac_id_for_pdev(
8187 							mac_id,	pdev->pdev_id);
8188 
8189 				pdev->pktlog_ppdu_stats = true;
8190 				dp_h2t_cfg_stats_msg_send(pdev,
8191 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
8192 					mac_for_pdev);
8193 			}
8194 			break;
8195 
8196 		default:
8197 			/* Nothing needs to be done for other pktlog types */
8198 			break;
8199 		}
8200 	} else {
8201 		switch (event) {
8202 		case WDI_EVENT_RX_DESC:
8203 		case WDI_EVENT_LITE_RX:
8204 			if (pdev->monitor_vdev) {
8205 				/* Nothing needs to be done if monitor mode is
8206 				 * enabled
8207 				 */
8208 				return 0;
8209 			}
8210 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
8211 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
8212 
8213 				for (mac_id = 0; mac_id < max_mac_rings;
8214 								mac_id++) {
8215 					int mac_for_pdev =
8216 						dp_get_mac_id_for_pdev(mac_id,
8217 								pdev->pdev_id);
8218 
8219 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8220 					  mac_for_pdev,
8221 					  pdev->rxdma_mon_status_ring[mac_id]
8222 					  .hal_srng,
8223 					  RXDMA_MONITOR_STATUS,
8224 					  RX_BUFFER_SIZE,
8225 					  &htt_tlv_filter);
8226 				}
8227 
8228 				if (soc->reap_timer_init)
8229 					qdf_timer_stop(&soc->mon_reap_timer);
8230 			}
8231 			break;
8232 		case WDI_EVENT_LITE_T2H:
8233 			if (pdev->monitor_vdev) {
8234 				/* Nothing needs to be done if monitor mode is
8235 				 * enabled
8236 				 */
8237 				return 0;
8238 			}
8239 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
8240 			 * passing value 0. Once these macros will define in htt
8241 			 * header file will use proper macros
8242 			*/
8243 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
8244 				int mac_for_pdev =
8245 						dp_get_mac_id_for_pdev(mac_id,
8246 								pdev->pdev_id);
8247 
8248 				pdev->pktlog_ppdu_stats = false;
8249 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
8250 					dp_h2t_cfg_stats_msg_send(pdev, 0,
8251 								mac_for_pdev);
8252 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
8253 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
8254 								mac_for_pdev);
8255 				} else if (pdev->enhanced_stats_en) {
8256 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
8257 								mac_for_pdev);
8258 				}
8259 			}
8260 
8261 			break;
8262 		default:
8263 			/* Nothing needs to be done for other pktlog types */
8264 			break;
8265 		}
8266 	}
8267 	return 0;
8268 }
8269 #endif
8270