xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 39bb395e2e4be142fb2649c48310a959d4f07122)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_htt.h"
31 #include "dp_types.h"
32 #include "dp_internal.h"
33 #include "dp_tx.h"
34 #include "dp_tx_desc.h"
35 #include "dp_rx.h"
36 #include <cdp_txrx_handle.h>
37 #include <wlan_cfg.h>
38 #include "cdp_txrx_cmn_struct.h"
39 #include "cdp_txrx_stats_struct.h"
40 #include "cdp_txrx_cmn_reg.h"
41 #include <qdf_util.h>
42 #include "dp_peer.h"
43 #include "dp_rx_mon.h"
44 #include "htt_stats.h"
45 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
46 #include "cfg_ucfg_api.h"
47 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
48 #include "cdp_txrx_flow_ctrl_v2.h"
49 #else
50 static inline void
51 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
52 {
53 	return;
54 }
55 #endif
56 #include "dp_ipa.h"
57 
58 #include "dp_cal_client_api.h"
59 
60 #ifdef CONFIG_MCL
61 #ifndef REMOVE_PKT_LOG
62 #include <pktlog_ac_api.h>
63 #include <pktlog_ac.h>
64 #endif
65 #endif
66 static void dp_pktlogmod_exit(struct dp_pdev *handle);
67 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
68 				uint8_t *peer_mac_addr,
69 				struct cdp_ctrl_objmgr_peer *ctrl_peer);
70 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
71 static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
72 static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
73 
74 #define DP_INTR_POLL_TIMER_MS	10
75 #define DP_WDS_AGING_TIMER_DEFAULT_MS	120000
76 #define DP_MCS_LENGTH (6*MAX_MCS)
77 #define DP_NSS_LENGTH (6*SS_COUNT)
78 #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
79 #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
80 #define DP_MAX_MCS_STRING_LEN 30
81 #define DP_CURR_FW_STATS_AVAIL 19
82 #define DP_HTT_DBG_EXT_STATS_MAX 256
83 #define DP_MAX_SLEEP_TIME 100
84 
85 #ifdef IPA_OFFLOAD
86 /* Exclude IPA rings from the interrupt context */
87 #define TX_RING_MASK_VAL	0xb
88 #define RX_RING_MASK_VAL	0x7
89 #else
90 #define TX_RING_MASK_VAL	0xF
91 #define RX_RING_MASK_VAL	0xF
92 #endif
93 
94 #define STR_MAXLEN	64
95 
96 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
97 
98 /* PPDU stats mask sent to FW to enable enhanced stats */
99 #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
100 /* PPDU stats mask sent to FW to support debug sniffer feature */
101 #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
102 /* PPDU stats mask sent to FW to support BPR feature*/
103 #define DP_PPDU_STATS_CFG_BPR 0x2000
104 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
105 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
106 				   DP_PPDU_STATS_CFG_ENH_STATS)
107 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
108 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
109 				      DP_PPDU_TXLITE_STATS_BITMASK_CFG)
110 
111 #define RNG_ERR		"SRNG setup failed for"
112 /**
113  * default_dscp_tid_map - Default DSCP-TID mapping
114  *
115  * DSCP        TID
116  * 000000      0
117  * 001000      1
118  * 010000      2
119  * 011000      3
120  * 100000      4
121  * 101000      5
122  * 110000      6
123  * 111000      7
124  */
125 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
126 	0, 0, 0, 0, 0, 0, 0, 0,
127 	1, 1, 1, 1, 1, 1, 1, 1,
128 	2, 2, 2, 2, 2, 2, 2, 2,
129 	3, 3, 3, 3, 3, 3, 3, 3,
130 	4, 4, 4, 4, 4, 4, 4, 4,
131 	5, 5, 5, 5, 5, 5, 5, 5,
132 	6, 6, 6, 6, 6, 6, 6, 6,
133 	7, 7, 7, 7, 7, 7, 7, 7,
134 };
135 
136 /*
137  * struct dp_rate_debug
138  *
139  * @mcs_type: print string for a given mcs
140  * @valid: valid mcs rate?
141  */
142 struct dp_rate_debug {
143 	char mcs_type[DP_MAX_MCS_STRING_LEN];
144 	uint8_t valid;
145 };
146 
147 #define MCS_VALID 1
148 #define MCS_INVALID 0
149 
150 static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
151 
152 	{
153 		{"OFDM 48 Mbps", MCS_VALID},
154 		{"OFDM 24 Mbps", MCS_VALID},
155 		{"OFDM 12 Mbps", MCS_VALID},
156 		{"OFDM 6 Mbps ", MCS_VALID},
157 		{"OFDM 54 Mbps", MCS_VALID},
158 		{"OFDM 36 Mbps", MCS_VALID},
159 		{"OFDM 18 Mbps", MCS_VALID},
160 		{"OFDM 9 Mbps ", MCS_VALID},
161 		{"INVALID ", MCS_INVALID},
162 		{"INVALID ", MCS_INVALID},
163 		{"INVALID ", MCS_INVALID},
164 		{"INVALID ", MCS_INVALID},
165 		{"INVALID ", MCS_VALID},
166 	},
167 	{
168 		{"CCK 11 Mbps Long  ", MCS_VALID},
169 		{"CCK 5.5 Mbps Long ", MCS_VALID},
170 		{"CCK 2 Mbps Long   ", MCS_VALID},
171 		{"CCK 1 Mbps Long   ", MCS_VALID},
172 		{"CCK 11 Mbps Short ", MCS_VALID},
173 		{"CCK 5.5 Mbps Short", MCS_VALID},
174 		{"CCK 2 Mbps Short  ", MCS_VALID},
175 		{"INVALID ", MCS_INVALID},
176 		{"INVALID ", MCS_INVALID},
177 		{"INVALID ", MCS_INVALID},
178 		{"INVALID ", MCS_INVALID},
179 		{"INVALID ", MCS_INVALID},
180 		{"INVALID ", MCS_VALID},
181 	},
182 	{
183 		{"HT MCS 0 (BPSK 1/2)  ", MCS_VALID},
184 		{"HT MCS 1 (QPSK 1/2)  ", MCS_VALID},
185 		{"HT MCS 2 (QPSK 3/4)  ", MCS_VALID},
186 		{"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
187 		{"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
188 		{"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
189 		{"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
190 		{"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
191 		{"INVALID ", MCS_INVALID},
192 		{"INVALID ", MCS_INVALID},
193 		{"INVALID ", MCS_INVALID},
194 		{"INVALID ", MCS_INVALID},
195 		{"INVALID ", MCS_VALID},
196 	},
197 	{
198 		{"VHT MCS 0 (BPSK 1/2)     ", MCS_VALID},
199 		{"VHT MCS 1 (QPSK 1/2)     ", MCS_VALID},
200 		{"VHT MCS 2 (QPSK 3/4)     ", MCS_VALID},
201 		{"VHT MCS 3 (16-QAM 1/2)   ", MCS_VALID},
202 		{"VHT MCS 4 (16-QAM 3/4)   ", MCS_VALID},
203 		{"VHT MCS 5 (64-QAM 2/3)   ", MCS_VALID},
204 		{"VHT MCS 6 (64-QAM 3/4)   ", MCS_VALID},
205 		{"VHT MCS 7 (64-QAM 5/6)   ", MCS_VALID},
206 		{"VHT MCS 8 (256-QAM 3/4)  ", MCS_VALID},
207 		{"VHT MCS 9 (256-QAM 5/6)  ", MCS_VALID},
208 		{"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
209 		{"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
210 		{"INVALID ", MCS_VALID},
211 	},
212 	{
213 		{"HE MCS 0 (BPSK 1/2)     ", MCS_VALID},
214 		{"HE MCS 1 (QPSK 1/2)     ", MCS_VALID},
215 		{"HE MCS 2 (QPSK 3/4)     ", MCS_VALID},
216 		{"HE MCS 3 (16-QAM 1/2)   ", MCS_VALID},
217 		{"HE MCS 4 (16-QAM 3/4)   ", MCS_VALID},
218 		{"HE MCS 5 (64-QAM 2/3)   ", MCS_VALID},
219 		{"HE MCS 6 (64-QAM 3/4)   ", MCS_VALID},
220 		{"HE MCS 7 (64-QAM 5/6)   ", MCS_VALID},
221 		{"HE MCS 8 (256-QAM 3/4)  ", MCS_VALID},
222 		{"HE MCS 9 (256-QAM 5/6)  ", MCS_VALID},
223 		{"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
224 		{"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
225 		{"INVALID ", MCS_VALID},
226 	}
227 };
228 
229 /**
230  * @brief Cpu ring map types
231  */
232 enum dp_cpu_ring_map_types {
233 	DP_DEFAULT_MAP,
234 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
235 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
236 	DP_NSS_ALL_RADIO_OFFLOADED_MAP,
237 	DP_CPU_RING_MAP_MAX
238 };
239 
240 /**
241  * @brief Cpu to tx ring map
242  */
243 static uint8_t dp_cpu_ring_map[DP_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
244 	{0x0, 0x1, 0x2, 0x0},
245 	{0x1, 0x2, 0x1, 0x2},
246 	{0x0, 0x2, 0x0, 0x2},
247 	{0x2, 0x2, 0x2, 0x2}
248 };
249 
250 /**
251  * @brief Select the type of statistics
252  */
253 enum dp_stats_type {
254 	STATS_FW = 0,
255 	STATS_HOST = 1,
256 	STATS_TYPE_MAX = 2,
257 };
258 
259 /**
260  * @brief General Firmware statistics options
261  *
262  */
263 enum dp_fw_stats {
264 	TXRX_FW_STATS_INVALID	= -1,
265 };
266 
267 /**
268  * dp_stats_mapping_table - Firmware and Host statistics
269  * currently supported
270  */
271 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
272 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
273 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
274 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
275 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
276 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
277 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
278 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
279 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
280 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
281 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
282 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
283 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
284 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
285 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
286 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
287 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
288 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
289 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
290 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
291 	/* Last ENUM for HTT FW STATS */
292 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
293 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
294 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
295 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
296 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
297 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
298 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
299 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
300 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
301 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
302 };
303 
304 /* MCL specific functions */
305 #ifdef CONFIG_MCL
306 /**
307  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
308  * @soc: pointer to dp_soc handle
309  * @intr_ctx_num: interrupt context number for which mon mask is needed
310  *
311  * For MCL, monitor mode rings are being processed in timer contexts (polled).
312  * This function is returning 0, since in interrupt mode(softirq based RX),
313  * we donot want to process monitor mode rings in a softirq.
314  *
315  * So, in case packet log is enabled for SAP/STA/P2P modes,
316  * regular interrupt processing will not process monitor mode rings. It would be
317  * done in a separate timer context.
318  *
319  * Return: 0
320  */
321 static inline
322 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
323 {
324 	return 0;
325 }
326 
327 /*
328  * dp_service_mon_rings()- timer to reap monitor rings
329  * reqd as we are not getting ppdu end interrupts
330  * @arg: SoC Handle
331  *
332  * Return:
333  *
334  */
335 static void dp_service_mon_rings(void *arg)
336 {
337 	struct dp_soc *soc = (struct dp_soc *)arg;
338 	int ring = 0, work_done, mac_id;
339 	struct dp_pdev *pdev = NULL;
340 
341 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
342 		pdev = soc->pdev_list[ring];
343 		if (!pdev)
344 			continue;
345 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
346 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
347 								pdev->pdev_id);
348 			work_done = dp_mon_process(soc, mac_for_pdev,
349 						   QCA_NAPI_BUDGET);
350 
351 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
352 				  FL("Reaped %d descs from Monitor rings"),
353 				  work_done);
354 		}
355 	}
356 
357 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
358 }
359 
360 #ifndef REMOVE_PKT_LOG
361 /**
362  * dp_pkt_log_init() - API to initialize packet log
363  * @ppdev: physical device handle
364  * @scn: HIF context
365  *
366  * Return: none
367  */
368 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
369 {
370 	struct dp_pdev *handle = (struct dp_pdev *)ppdev;
371 
372 	if (handle->pkt_log_init) {
373 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
374 			  "%s: Packet log not initialized", __func__);
375 		return;
376 	}
377 
378 	pktlog_sethandle(&handle->pl_dev, scn);
379 	pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
380 
381 	if (pktlogmod_init(scn)) {
382 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
383 			  "%s: pktlogmod_init failed", __func__);
384 		handle->pkt_log_init = false;
385 	} else {
386 		handle->pkt_log_init = true;
387 	}
388 }
389 
390 /**
391  * dp_pkt_log_con_service() - connect packet log service
392  * @ppdev: physical device handle
393  * @scn: device context
394  *
395  * Return: none
396  */
397 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
398 {
399 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
400 
401 	dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
402 	pktlog_htc_attach();
403 }
404 
405 /**
406  * dp_pktlogmod_exit() - API to cleanup pktlog info
407  * @handle: Pdev handle
408  *
409  * Return: none
410  */
411 static void dp_pktlogmod_exit(struct dp_pdev *handle)
412 {
413 	void *scn = (void *)handle->soc->hif_handle;
414 
415 	if (!scn) {
416 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
417 			  "%s: Invalid hif(scn) handle", __func__);
418 		return;
419 	}
420 
421 	pktlogmod_exit(scn);
422 	handle->pkt_log_init = false;
423 }
424 #endif
425 #else
426 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
427 
428 /**
429  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
430  * @soc: pointer to dp_soc handle
431  * @intr_ctx_num: interrupt context number for which mon mask is needed
432  *
433  * Return: mon mask value
434  */
435 static inline
436 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
437 {
438 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
439 }
440 #endif
441 
442 /**
443  * dp_get_dp_vdev_from_cdp_vdev() - get dp_vdev from cdp_vdev by type-casting
444  * @cdp_opaque_vdev: pointer to cdp_vdev
445  *
446  * Return: pointer to dp_vdev
447  */
448 static
449 struct dp_vdev * dp_get_dp_vdev_from_cdp_vdev(struct cdp_vdev *cdp_opaque_vdev)
450 {
451 	return (struct dp_vdev *)cdp_opaque_vdev;
452 }
453 
454 
455 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
456 					struct cdp_peer *peer_hdl,
457 					uint8_t *mac_addr,
458 					enum cdp_txrx_ast_entry_type type,
459 					uint32_t flags)
460 {
461 
462 	return dp_peer_add_ast((struct dp_soc *)soc_hdl,
463 				(struct dp_peer *)peer_hdl,
464 				mac_addr,
465 				type,
466 				flags);
467 }
468 
469 static void dp_peer_del_ast_wifi3(struct cdp_soc_t *soc_hdl,
470 					 void *ast_entry_hdl)
471 {
472 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
473 	qdf_spin_lock_bh(&soc->ast_lock);
474 	dp_peer_del_ast((struct dp_soc *)soc_hdl,
475 			(struct dp_ast_entry *)ast_entry_hdl);
476 	qdf_spin_unlock_bh(&soc->ast_lock);
477 }
478 
479 
480 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
481 						struct cdp_peer *peer_hdl,
482 						uint8_t *wds_macaddr,
483 						uint32_t flags)
484 {
485 	int status = -1;
486 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
487 	struct dp_ast_entry  *ast_entry = NULL;
488 
489 	qdf_spin_lock_bh(&soc->ast_lock);
490 	ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
491 
492 	if (ast_entry) {
493 		status = dp_peer_update_ast(soc,
494 					    (struct dp_peer *)peer_hdl,
495 					   ast_entry, flags);
496 	}
497 
498 	qdf_spin_unlock_bh(&soc->ast_lock);
499 
500 	return status;
501 }
502 
503 /*
504  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
505  * @soc_handle:		Datapath SOC handle
506  * @wds_macaddr:	WDS entry MAC Address
507  * Return: None
508  */
509 static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
510 				   uint8_t *wds_macaddr, void *vdev_handle)
511 {
512 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
513 	struct dp_ast_entry *ast_entry = NULL;
514 
515 	qdf_spin_lock_bh(&soc->ast_lock);
516 	ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
517 
518 	if (ast_entry) {
519 		if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
520 		    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF)) {
521 			ast_entry->is_active = TRUE;
522 		}
523 	}
524 
525 	qdf_spin_unlock_bh(&soc->ast_lock);
526 }
527 
528 /*
529  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
530  * @soc:		Datapath SOC handle
531  *
532  * Return: None
533  */
534 static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
535 					 void *vdev_hdl)
536 {
537 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
538 	struct dp_pdev *pdev;
539 	struct dp_vdev *vdev;
540 	struct dp_peer *peer;
541 	struct dp_ast_entry *ase, *temp_ase;
542 	int i;
543 
544 	qdf_spin_lock_bh(&soc->ast_lock);
545 
546 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
547 		pdev = soc->pdev_list[i];
548 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
549 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
550 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
551 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
552 					if ((ase->type ==
553 					     CDP_TXRX_AST_TYPE_STATIC) ||
554 					    (ase->type ==
555 					     CDP_TXRX_AST_TYPE_SELF))
556 						continue;
557 					ase->is_active = TRUE;
558 				}
559 			}
560 		}
561 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
562 	}
563 
564 	qdf_spin_unlock_bh(&soc->ast_lock);
565 }
566 
567 /*
568  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
569  * @soc:		Datapath SOC handle
570  *
571  * Return: None
572  */
573 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
574 {
575 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
576 	struct dp_pdev *pdev;
577 	struct dp_vdev *vdev;
578 	struct dp_peer *peer;
579 	struct dp_ast_entry *ase, *temp_ase;
580 	int i;
581 
582 	qdf_spin_lock_bh(&soc->ast_lock);
583 
584 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
585 		pdev = soc->pdev_list[i];
586 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
587 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
588 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
589 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
590 					if ((ase->type ==
591 					     CDP_TXRX_AST_TYPE_STATIC) ||
592 					    (ase->type ==
593 					     CDP_TXRX_AST_TYPE_SELF))
594 						continue;
595 					dp_peer_del_ast(soc, ase);
596 				}
597 			}
598 		}
599 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
600 	}
601 
602 	qdf_spin_unlock_bh(&soc->ast_lock);
603 }
604 
605 static void *dp_peer_ast_hash_find_wifi3(struct cdp_soc_t *soc_hdl,
606 						uint8_t *ast_mac_addr)
607 {
608 	struct dp_ast_entry *ast_entry;
609 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
610 	qdf_spin_lock_bh(&soc->ast_lock);
611 	ast_entry = dp_peer_ast_hash_find(soc, ast_mac_addr);
612 	qdf_spin_unlock_bh(&soc->ast_lock);
613 	return (void *)ast_entry;
614 }
615 
616 static uint8_t dp_peer_ast_get_pdev_id_wifi3(struct cdp_soc_t *soc_hdl,
617 							void *ast_entry_hdl)
618 {
619 	return dp_peer_ast_get_pdev_id((struct dp_soc *)soc_hdl,
620 					(struct dp_ast_entry *)ast_entry_hdl);
621 }
622 
623 static uint8_t dp_peer_ast_get_next_hop_wifi3(struct cdp_soc_t *soc_hdl,
624 							void *ast_entry_hdl)
625 {
626 	return dp_peer_ast_get_next_hop((struct dp_soc *)soc_hdl,
627 					(struct dp_ast_entry *)ast_entry_hdl);
628 }
629 
630 static void dp_peer_ast_set_type_wifi3(
631 					struct cdp_soc_t *soc_hdl,
632 					void *ast_entry_hdl,
633 					enum cdp_txrx_ast_entry_type type)
634 {
635 	dp_peer_ast_set_type((struct dp_soc *)soc_hdl,
636 				(struct dp_ast_entry *)ast_entry_hdl,
637 				type);
638 }
639 
640 static enum cdp_txrx_ast_entry_type dp_peer_ast_get_type_wifi3(
641 					struct cdp_soc_t *soc_hdl,
642 					void *ast_entry_hdl)
643 {
644 	return ((struct dp_ast_entry *)ast_entry_hdl)->type;
645 }
646 
647 /**
648  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
649  * @ring_num: ring num of the ring being queried
650  * @grp_mask: the grp_mask array for the ring type in question.
651  *
652  * The grp_mask array is indexed by group number and the bit fields correspond
653  * to ring numbers.  We are finding which interrupt group a ring belongs to.
654  *
655  * Return: the index in the grp_mask array with the ring number.
656  * -QDF_STATUS_E_NOENT if no entry is found
657  */
658 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
659 {
660 	int ext_group_num;
661 	int mask = 1 << ring_num;
662 
663 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
664 	     ext_group_num++) {
665 		if (mask & grp_mask[ext_group_num])
666 			return ext_group_num;
667 	}
668 
669 	return -QDF_STATUS_E_NOENT;
670 }
671 
672 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
673 				       enum hal_ring_type ring_type,
674 				       int ring_num)
675 {
676 	int *grp_mask;
677 
678 	switch (ring_type) {
679 	case WBM2SW_RELEASE:
680 		/* dp_tx_comp_handler - soc->tx_comp_ring */
681 		if (ring_num < 3)
682 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
683 
684 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
685 		else if (ring_num == 3) {
686 			/* sw treats this as a separate ring type */
687 			grp_mask = &soc->wlan_cfg_ctx->
688 				int_rx_wbm_rel_ring_mask[0];
689 			ring_num = 0;
690 		} else {
691 			qdf_assert(0);
692 			return -QDF_STATUS_E_NOENT;
693 		}
694 	break;
695 
696 	case REO_EXCEPTION:
697 		/* dp_rx_err_process - &soc->reo_exception_ring */
698 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
699 	break;
700 
701 	case REO_DST:
702 		/* dp_rx_process - soc->reo_dest_ring */
703 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
704 	break;
705 
706 	case REO_STATUS:
707 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
708 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
709 	break;
710 
711 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
712 	case RXDMA_MONITOR_STATUS:
713 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
714 	case RXDMA_MONITOR_DST:
715 		/* dp_mon_process */
716 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
717 	break;
718 	case RXDMA_DST:
719 		/* dp_rxdma_err_process */
720 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
721 	break;
722 
723 	case RXDMA_BUF:
724 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
725 	break;
726 
727 	case RXDMA_MONITOR_BUF:
728 		/* TODO: support low_thresh interrupt */
729 		return -QDF_STATUS_E_NOENT;
730 	break;
731 
732 	case TCL_DATA:
733 	case TCL_CMD:
734 	case REO_CMD:
735 	case SW2WBM_RELEASE:
736 	case WBM_IDLE_LINK:
737 		/* normally empty SW_TO_HW rings */
738 		return -QDF_STATUS_E_NOENT;
739 	break;
740 
741 	case TCL_STATUS:
742 	case REO_REINJECT:
743 		/* misc unused rings */
744 		return -QDF_STATUS_E_NOENT;
745 	break;
746 
747 	case CE_SRC:
748 	case CE_DST:
749 	case CE_DST_STATUS:
750 		/* CE_rings - currently handled by hif */
751 	default:
752 		return -QDF_STATUS_E_NOENT;
753 	break;
754 	}
755 
756 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
757 }
758 
759 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
760 			      *ring_params, int ring_type, int ring_num)
761 {
762 	int msi_group_number;
763 	int msi_data_count;
764 	int ret;
765 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
766 
767 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
768 					    &msi_data_count, &msi_data_start,
769 					    &msi_irq_start);
770 
771 	if (ret)
772 		return;
773 
774 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
775 						       ring_num);
776 	if (msi_group_number < 0) {
777 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
778 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
779 			ring_type, ring_num);
780 		ring_params->msi_addr = 0;
781 		ring_params->msi_data = 0;
782 		return;
783 	}
784 
785 	if (msi_group_number > msi_data_count) {
786 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
787 			FL("2 msi_groups will share an msi; msi_group_num %d"),
788 			msi_group_number);
789 
790 		QDF_ASSERT(0);
791 	}
792 
793 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
794 
795 	ring_params->msi_addr = addr_low;
796 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
797 	ring_params->msi_data = (msi_group_number % msi_data_count)
798 		+ msi_data_start;
799 	ring_params->flags |= HAL_SRNG_MSI_INTR;
800 }
801 
802 /**
803  * dp_print_ast_stats() - Dump AST table contents
804  * @soc: Datapath soc handle
805  *
806  * return void
807  */
808 #ifdef FEATURE_AST
809 static void dp_print_ast_stats(struct dp_soc *soc)
810 {
811 	uint8_t i;
812 	uint8_t num_entries = 0;
813 	struct dp_vdev *vdev;
814 	struct dp_pdev *pdev;
815 	struct dp_peer *peer;
816 	struct dp_ast_entry *ase, *tmp_ase;
817 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
818 			"NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS"};
819 
820 	DP_PRINT_STATS("AST Stats:");
821 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
822 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
823 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
824 	DP_PRINT_STATS("AST Table:");
825 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
826 		pdev = soc->pdev_list[i];
827 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
828 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
829 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
830 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
831 					DP_PRINT_STATS("%6d mac_addr = %pM"
832 							" peer_mac_addr = %pM"
833 							" type = %s"
834 							" next_hop = %d"
835 							" is_active = %d"
836 							" is_bss = %d"
837 							" ast_idx = %d"
838 							" pdev_id = %d"
839 							" vdev_id = %d",
840 							++num_entries,
841 							ase->mac_addr.raw,
842 							ase->peer->mac_addr.raw,
843 							type[ase->type],
844 							ase->next_hop,
845 							ase->is_active,
846 							ase->is_bss,
847 							ase->ast_idx,
848 							ase->pdev_id,
849 							ase->vdev_id);
850 				}
851 			}
852 		}
853 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
854 	}
855 }
856 #else
857 static void dp_print_ast_stats(struct dp_soc *soc)
858 {
859 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
860 	return;
861 }
862 #endif
863 
864 static void dp_print_peer_table(struct dp_vdev *vdev)
865 {
866 	struct dp_peer *peer = NULL;
867 
868 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
869 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
870 		if (!peer) {
871 			DP_PRINT_STATS("Invalid Peer");
872 			return;
873 		}
874 		DP_PRINT_STATS("    peer_mac_addr = %pM"
875 			" nawds_enabled = %d"
876 			" bss_peer = %d"
877 			" wapi = %d"
878 			" wds_enabled = %d"
879 			" delete in progress = %d",
880 			peer->mac_addr.raw,
881 			peer->nawds_enabled,
882 			peer->bss_peer,
883 			peer->wapi,
884 			peer->wds_enabled,
885 			peer->delete_in_progress);
886 	}
887 }
888 
889 /*
890  * dp_setup_srng - Internal function to setup SRNG rings used by data path
891  */
892 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
893 	int ring_type, int ring_num, int mac_id, uint32_t num_entries)
894 {
895 	void *hal_soc = soc->hal_soc;
896 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
897 	/* TODO: See if we should get align size from hal */
898 	uint32_t ring_base_align = 8;
899 	struct hal_srng_params ring_params;
900 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
901 
902 	/* TODO: Currently hal layer takes care of endianness related settings.
903 	 * See if these settings need to passed from DP layer
904 	 */
905 	ring_params.flags = 0;
906 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
907 		  FL("Ring type: %d, num:%d"), ring_type, ring_num);
908 
909 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
910 	srng->hal_srng = NULL;
911 	srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
912 	srng->num_entries = num_entries;
913 	srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
914 		soc->osdev, soc->osdev->dev, srng->alloc_size,
915 		&(srng->base_paddr_unaligned));
916 
917 	if (!srng->base_vaddr_unaligned) {
918 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
919 			FL("alloc failed - ring_type: %d, ring_num %d"),
920 			ring_type, ring_num);
921 		return QDF_STATUS_E_NOMEM;
922 	}
923 
924 	ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
925 		((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
926 	ring_params.ring_base_paddr = srng->base_paddr_unaligned +
927 		((unsigned long)(ring_params.ring_base_vaddr) -
928 		(unsigned long)srng->base_vaddr_unaligned);
929 	ring_params.num_entries = num_entries;
930 
931 	if (soc->intr_mode == DP_INTR_MSI) {
932 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
933 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
934 			  FL("Using MSI for ring_type: %d, ring_num %d"),
935 			  ring_type, ring_num);
936 
937 	} else {
938 		ring_params.msi_data = 0;
939 		ring_params.msi_addr = 0;
940 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
941 			  FL("Skipping MSI for ring_type: %d, ring_num %d"),
942 			  ring_type, ring_num);
943 	}
944 
945 	/*
946 	 * Setup interrupt timer and batch counter thresholds for
947 	 * interrupt mitigation based on ring type
948 	 */
949 	if (ring_type == REO_DST) {
950 		ring_params.intr_timer_thres_us =
951 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
952 		ring_params.intr_batch_cntr_thres_entries =
953 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
954 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
955 		ring_params.intr_timer_thres_us =
956 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
957 		ring_params.intr_batch_cntr_thres_entries =
958 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
959 	} else {
960 		ring_params.intr_timer_thres_us =
961 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
962 		ring_params.intr_batch_cntr_thres_entries =
963 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
964 	}
965 
966 	/* Enable low threshold interrupts for rx buffer rings (regular and
967 	 * monitor buffer rings.
968 	 * TODO: See if this is required for any other ring
969 	 */
970 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
971 		(ring_type == RXDMA_MONITOR_STATUS)) {
972 		/* TODO: Setting low threshold to 1/8th of ring size
973 		 * see if this needs to be configurable
974 		 */
975 		ring_params.low_threshold = num_entries >> 3;
976 		ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
977 		ring_params.intr_timer_thres_us =
978 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
979 		ring_params.intr_batch_cntr_thres_entries = 0;
980 	}
981 
982 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
983 		mac_id, &ring_params);
984 
985 	if (!srng->hal_srng) {
986 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
987 				srng->alloc_size,
988 				srng->base_vaddr_unaligned,
989 				srng->base_paddr_unaligned, 0);
990 	}
991 
992 	return 0;
993 }
994 
995 /**
996  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
997  * Any buffers allocated and attached to ring entries are expected to be freed
998  * before calling this function.
999  */
1000 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
1001 	int ring_type, int ring_num)
1002 {
1003 	if (!srng->hal_srng) {
1004 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1005 			FL("Ring type: %d, num:%d not setup"),
1006 			ring_type, ring_num);
1007 		return;
1008 	}
1009 
1010 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1011 
1012 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1013 				srng->alloc_size,
1014 				srng->base_vaddr_unaligned,
1015 				srng->base_paddr_unaligned, 0);
1016 	srng->hal_srng = NULL;
1017 }
1018 
1019 /* TODO: Need this interface from HIF */
1020 void *hif_get_hal_handle(void *hif_handle);
1021 
1022 /*
1023  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1024  * @dp_ctx: DP SOC handle
1025  * @budget: Number of frames/descriptors that can be processed in one shot
1026  *
1027  * Return: remaining budget/quota for the soc device
1028  */
1029 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
1030 {
1031 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1032 	struct dp_soc *soc = int_ctx->soc;
1033 	int ring = 0;
1034 	uint32_t work_done  = 0;
1035 	int budget = dp_budget;
1036 	uint8_t tx_mask = int_ctx->tx_ring_mask;
1037 	uint8_t rx_mask = int_ctx->rx_ring_mask;
1038 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1039 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1040 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1041 	uint32_t remaining_quota = dp_budget;
1042 	struct dp_pdev *pdev = NULL;
1043 	int mac_id;
1044 
1045 	/* Process Tx completion interrupts first to return back buffers */
1046 	while (tx_mask) {
1047 		if (tx_mask & 0x1) {
1048 			work_done = dp_tx_comp_handler(soc,
1049 					soc->tx_comp_ring[ring].hal_srng,
1050 					remaining_quota);
1051 
1052 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1053 				"tx mask 0x%x ring %d, budget %d, work_done %d",
1054 				tx_mask, ring, budget, work_done);
1055 
1056 			budget -= work_done;
1057 			if (budget <= 0)
1058 				goto budget_done;
1059 
1060 			remaining_quota = budget;
1061 		}
1062 		tx_mask = tx_mask >> 1;
1063 		ring++;
1064 	}
1065 
1066 
1067 	/* Process REO Exception ring interrupt */
1068 	if (rx_err_mask) {
1069 		work_done = dp_rx_err_process(soc,
1070 				soc->reo_exception_ring.hal_srng,
1071 				remaining_quota);
1072 
1073 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1074 			"REO Exception Ring: work_done %d budget %d",
1075 			work_done, budget);
1076 
1077 		budget -=  work_done;
1078 		if (budget <= 0) {
1079 			goto budget_done;
1080 		}
1081 		remaining_quota = budget;
1082 	}
1083 
1084 	/* Process Rx WBM release ring interrupt */
1085 	if (rx_wbm_rel_mask) {
1086 		work_done = dp_rx_wbm_err_process(soc,
1087 				soc->rx_rel_ring.hal_srng, remaining_quota);
1088 
1089 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1090 			"WBM Release Ring: work_done %d budget %d",
1091 			work_done, budget);
1092 
1093 		budget -=  work_done;
1094 		if (budget <= 0) {
1095 			goto budget_done;
1096 		}
1097 		remaining_quota = budget;
1098 	}
1099 
1100 	/* Process Rx interrupts */
1101 	if (rx_mask) {
1102 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1103 			if (rx_mask & (1 << ring)) {
1104 				work_done = dp_rx_process(int_ctx,
1105 					    soc->reo_dest_ring[ring].hal_srng,
1106 					    ring,
1107 					    remaining_quota);
1108 
1109 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1110 					"rx mask 0x%x ring %d, work_done %d budget %d",
1111 					rx_mask, ring, work_done, budget);
1112 
1113 				budget -=  work_done;
1114 				if (budget <= 0)
1115 					goto budget_done;
1116 				remaining_quota = budget;
1117 			}
1118 		}
1119 		for (ring = 0; ring < MAX_RX_MAC_RINGS; ring++) {
1120 			work_done = dp_rxdma_err_process(soc, ring,
1121 						remaining_quota);
1122 			budget -= work_done;
1123 		}
1124 	}
1125 
1126 	if (reo_status_mask)
1127 		dp_reo_status_ring_handler(soc);
1128 
1129 	/* Process LMAC interrupts */
1130 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
1131 		pdev = soc->pdev_list[ring];
1132 		if (pdev == NULL)
1133 			continue;
1134 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1135 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
1136 								pdev->pdev_id);
1137 
1138 			if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1139 				work_done = dp_mon_process(soc, mac_for_pdev,
1140 						remaining_quota);
1141 				budget -= work_done;
1142 				if (budget <= 0)
1143 					goto budget_done;
1144 				remaining_quota = budget;
1145 			}
1146 
1147 			if (int_ctx->rxdma2host_ring_mask &
1148 					(1 << mac_for_pdev)) {
1149 				work_done = dp_rxdma_err_process(soc,
1150 							mac_for_pdev,
1151 							remaining_quota);
1152 				budget -=  work_done;
1153 				if (budget <= 0)
1154 					goto budget_done;
1155 				remaining_quota = budget;
1156 			}
1157 
1158 			if (int_ctx->host2rxdma_ring_mask &
1159 						(1 << mac_for_pdev)) {
1160 				union dp_rx_desc_list_elem_t *desc_list = NULL;
1161 				union dp_rx_desc_list_elem_t *tail = NULL;
1162 				struct dp_srng *rx_refill_buf_ring =
1163 					&pdev->rx_refill_buf_ring;
1164 
1165 				DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1166 						1);
1167 				dp_rx_buffers_replenish(soc, mac_for_pdev,
1168 					rx_refill_buf_ring,
1169 					&soc->rx_desc_buf[mac_for_pdev], 0,
1170 					&desc_list, &tail);
1171 			}
1172 		}
1173 	}
1174 
1175 	qdf_lro_flush(int_ctx->lro_ctx);
1176 
1177 budget_done:
1178 	return dp_budget - budget;
1179 }
1180 
1181 /* dp_interrupt_timer()- timer poll for interrupts
1182  *
1183  * @arg: SoC Handle
1184  *
1185  * Return:
1186  *
1187  */
1188 static void dp_interrupt_timer(void *arg)
1189 {
1190 	struct dp_soc *soc = (struct dp_soc *) arg;
1191 	int i;
1192 
1193 	if (qdf_atomic_read(&soc->cmn_init_done)) {
1194 		for (i = 0;
1195 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1196 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
1197 
1198 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1199 	}
1200 }
1201 
1202 /*
1203  * dp_soc_interrupt_attach_poll() - Register handlers for DP interrupts
1204  * @txrx_soc: DP SOC handle
1205  *
1206  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1207  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1208  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1209  *
1210  * Return: 0 for success. nonzero for failure.
1211  */
1212 static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
1213 {
1214 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1215 	int i;
1216 
1217 	soc->intr_mode = DP_INTR_POLL;
1218 
1219 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1220 		soc->intr_ctx[i].dp_intr_id = i;
1221 		soc->intr_ctx[i].tx_ring_mask =
1222 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1223 		soc->intr_ctx[i].rx_ring_mask =
1224 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1225 		soc->intr_ctx[i].rx_mon_ring_mask =
1226 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1227 		soc->intr_ctx[i].rx_err_ring_mask =
1228 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1229 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1230 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1231 		soc->intr_ctx[i].reo_status_ring_mask =
1232 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1233 		soc->intr_ctx[i].rxdma2host_ring_mask =
1234 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1235 		soc->intr_ctx[i].soc = soc;
1236 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1237 	}
1238 
1239 	qdf_timer_init(soc->osdev, &soc->int_timer,
1240 			dp_interrupt_timer, (void *)soc,
1241 			QDF_TIMER_TYPE_WAKE_APPS);
1242 
1243 	return QDF_STATUS_SUCCESS;
1244 }
1245 
1246 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
1247 #if defined(CONFIG_MCL)
1248 extern int con_mode_monitor;
1249 /*
1250  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1251  * @txrx_soc: DP SOC handle
1252  *
1253  * Call the appropriate attach function based on the mode of operation.
1254  * This is a WAR for enabling monitor mode.
1255  *
1256  * Return: 0 for success. nonzero for failure.
1257  */
1258 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1259 {
1260 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1261 
1262 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1263 	     con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
1264 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1265 				  "%s: Poll mode", __func__);
1266 		return dp_soc_attach_poll(txrx_soc);
1267 	} else {
1268 
1269 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1270 				  "%s: Interrupt  mode", __func__);
1271 		return dp_soc_interrupt_attach(txrx_soc);
1272 	}
1273 }
1274 #else
1275 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
1276 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1277 {
1278 	return dp_soc_attach_poll(txrx_soc);
1279 }
1280 #else
1281 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1282 {
1283 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1284 
1285 	if (hif_is_polled_mode_enabled(soc->hif_handle))
1286 		return dp_soc_attach_poll(txrx_soc);
1287 	else
1288 		return dp_soc_interrupt_attach(txrx_soc);
1289 }
1290 #endif
1291 #endif
1292 
1293 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1294 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1295 {
1296 	int j;
1297 	int num_irq = 0;
1298 
1299 	int tx_mask =
1300 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1301 	int rx_mask =
1302 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1303 	int rx_mon_mask =
1304 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1305 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1306 					soc->wlan_cfg_ctx, intr_ctx_num);
1307 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1308 					soc->wlan_cfg_ctx, intr_ctx_num);
1309 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1310 					soc->wlan_cfg_ctx, intr_ctx_num);
1311 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1312 					soc->wlan_cfg_ctx, intr_ctx_num);
1313 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1314 					soc->wlan_cfg_ctx, intr_ctx_num);
1315 
1316 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1317 
1318 		if (tx_mask & (1 << j)) {
1319 			irq_id_map[num_irq++] =
1320 				(wbm2host_tx_completions_ring1 - j);
1321 		}
1322 
1323 		if (rx_mask & (1 << j)) {
1324 			irq_id_map[num_irq++] =
1325 				(reo2host_destination_ring1 - j);
1326 		}
1327 
1328 		if (rxdma2host_ring_mask & (1 << j)) {
1329 			irq_id_map[num_irq++] =
1330 				rxdma2host_destination_ring_mac1 -
1331 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1332 		}
1333 
1334 		if (host2rxdma_ring_mask & (1 << j)) {
1335 			irq_id_map[num_irq++] =
1336 				host2rxdma_host_buf_ring_mac1 -
1337 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1338 		}
1339 
1340 		if (rx_mon_mask & (1 << j)) {
1341 			irq_id_map[num_irq++] =
1342 				ppdu_end_interrupts_mac1 -
1343 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1344 			irq_id_map[num_irq++] =
1345 				rxdma2host_monitor_status_ring_mac1 -
1346 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1347 		}
1348 
1349 		if (rx_wbm_rel_ring_mask & (1 << j))
1350 			irq_id_map[num_irq++] = wbm2host_rx_release;
1351 
1352 		if (rx_err_ring_mask & (1 << j))
1353 			irq_id_map[num_irq++] = reo2host_exception;
1354 
1355 		if (reo_status_ring_mask & (1 << j))
1356 			irq_id_map[num_irq++] = reo2host_status;
1357 
1358 	}
1359 	*num_irq_r = num_irq;
1360 }
1361 
1362 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1363 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1364 		int msi_vector_count, int msi_vector_start)
1365 {
1366 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1367 					soc->wlan_cfg_ctx, intr_ctx_num);
1368 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1369 					soc->wlan_cfg_ctx, intr_ctx_num);
1370 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1371 					soc->wlan_cfg_ctx, intr_ctx_num);
1372 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1373 					soc->wlan_cfg_ctx, intr_ctx_num);
1374 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1375 					soc->wlan_cfg_ctx, intr_ctx_num);
1376 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1377 					soc->wlan_cfg_ctx, intr_ctx_num);
1378 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1379 					soc->wlan_cfg_ctx, intr_ctx_num);
1380 
1381 	unsigned int vector =
1382 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1383 	int num_irq = 0;
1384 
1385 	soc->intr_mode = DP_INTR_MSI;
1386 
1387 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1388 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1389 		irq_id_map[num_irq++] =
1390 			pld_get_msi_irq(soc->osdev->dev, vector);
1391 
1392 	*num_irq_r = num_irq;
1393 }
1394 
1395 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1396 				    int *irq_id_map, int *num_irq)
1397 {
1398 	int msi_vector_count, ret;
1399 	uint32_t msi_base_data, msi_vector_start;
1400 
1401 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1402 					    &msi_vector_count,
1403 					    &msi_base_data,
1404 					    &msi_vector_start);
1405 	if (ret)
1406 		return dp_soc_interrupt_map_calculate_integrated(soc,
1407 				intr_ctx_num, irq_id_map, num_irq);
1408 
1409 	else
1410 		dp_soc_interrupt_map_calculate_msi(soc,
1411 				intr_ctx_num, irq_id_map, num_irq,
1412 				msi_vector_count, msi_vector_start);
1413 }
1414 
1415 /*
1416  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1417  * @txrx_soc: DP SOC handle
1418  *
1419  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1420  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1421  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1422  *
1423  * Return: 0 for success. nonzero for failure.
1424  */
1425 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
1426 {
1427 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1428 
1429 	int i = 0;
1430 	int num_irq = 0;
1431 
1432 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1433 		int ret = 0;
1434 
1435 		/* Map of IRQ ids registered with one interrupt context */
1436 		int irq_id_map[HIF_MAX_GRP_IRQ];
1437 
1438 		int tx_mask =
1439 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1440 		int rx_mask =
1441 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1442 		int rx_mon_mask =
1443 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
1444 		int rx_err_ring_mask =
1445 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1446 		int rx_wbm_rel_ring_mask =
1447 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1448 		int reo_status_ring_mask =
1449 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1450 		int rxdma2host_ring_mask =
1451 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1452 		int host2rxdma_ring_mask =
1453 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1454 
1455 
1456 		soc->intr_ctx[i].dp_intr_id = i;
1457 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1458 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1459 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1460 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1461 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1462 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1463 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1464 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1465 
1466 		soc->intr_ctx[i].soc = soc;
1467 
1468 		num_irq = 0;
1469 
1470 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1471 					       &num_irq);
1472 
1473 		ret = hif_register_ext_group(soc->hif_handle,
1474 				num_irq, irq_id_map, dp_service_srngs,
1475 				&soc->intr_ctx[i], "dp_intr",
1476 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1477 
1478 		if (ret) {
1479 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1480 			FL("failed, ret = %d"), ret);
1481 
1482 			return QDF_STATUS_E_FAILURE;
1483 		}
1484 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1485 	}
1486 
1487 	hif_configure_ext_group_interrupts(soc->hif_handle);
1488 
1489 	return QDF_STATUS_SUCCESS;
1490 }
1491 
1492 /*
1493  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1494  * @txrx_soc: DP SOC handle
1495  *
1496  * Return: void
1497  */
1498 static void dp_soc_interrupt_detach(void *txrx_soc)
1499 {
1500 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1501 	int i;
1502 
1503 	if (soc->intr_mode == DP_INTR_POLL) {
1504 		qdf_timer_stop(&soc->int_timer);
1505 		qdf_timer_free(&soc->int_timer);
1506 	} else {
1507 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1508 	}
1509 
1510 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1511 		soc->intr_ctx[i].tx_ring_mask = 0;
1512 		soc->intr_ctx[i].rx_ring_mask = 0;
1513 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1514 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1515 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1516 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1517 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1518 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1519 
1520 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1521 	}
1522 }
1523 
1524 #define AVG_MAX_MPDUS_PER_TID 128
1525 #define AVG_TIDS_PER_CLIENT 2
1526 #define AVG_FLOWS_PER_TID 2
1527 #define AVG_MSDUS_PER_FLOW 128
1528 #define AVG_MSDUS_PER_MPDU 4
1529 
1530 /*
1531  * Allocate and setup link descriptor pool that will be used by HW for
1532  * various link and queue descriptors and managed by WBM
1533  */
1534 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1535 {
1536 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1537 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1538 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1539 	uint32_t num_mpdus_per_link_desc =
1540 		hal_num_mpdus_per_link_desc(soc->hal_soc);
1541 	uint32_t num_msdus_per_link_desc =
1542 		hal_num_msdus_per_link_desc(soc->hal_soc);
1543 	uint32_t num_mpdu_links_per_queue_desc =
1544 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1545 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1546 	uint32_t total_link_descs, total_mem_size;
1547 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1548 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1549 	uint32_t num_link_desc_banks;
1550 	uint32_t last_bank_size = 0;
1551 	uint32_t entry_size, num_entries;
1552 	int i;
1553 	uint32_t desc_id = 0;
1554 
1555 	/* Only Tx queue descriptors are allocated from common link descriptor
1556 	 * pool Rx queue descriptors are not included in this because (REO queue
1557 	 * extension descriptors) they are expected to be allocated contiguously
1558 	 * with REO queue descriptors
1559 	 */
1560 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1561 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1562 
1563 	num_mpdu_queue_descs = num_mpdu_link_descs /
1564 		num_mpdu_links_per_queue_desc;
1565 
1566 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1567 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1568 		num_msdus_per_link_desc;
1569 
1570 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1571 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1572 
1573 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1574 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1575 
1576 	/* Round up to power of 2 */
1577 	total_link_descs = 1;
1578 	while (total_link_descs < num_entries)
1579 		total_link_descs <<= 1;
1580 
1581 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1582 		FL("total_link_descs: %u, link_desc_size: %d"),
1583 		total_link_descs, link_desc_size);
1584 	total_mem_size =  total_link_descs * link_desc_size;
1585 
1586 	total_mem_size += link_desc_align;
1587 
1588 	if (total_mem_size <= max_alloc_size) {
1589 		num_link_desc_banks = 0;
1590 		last_bank_size = total_mem_size;
1591 	} else {
1592 		num_link_desc_banks = (total_mem_size) /
1593 			(max_alloc_size - link_desc_align);
1594 		last_bank_size = total_mem_size %
1595 			(max_alloc_size - link_desc_align);
1596 	}
1597 
1598 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1599 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
1600 		total_mem_size, num_link_desc_banks);
1601 
1602 	for (i = 0; i < num_link_desc_banks; i++) {
1603 		soc->link_desc_banks[i].base_vaddr_unaligned =
1604 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1605 			max_alloc_size,
1606 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1607 		soc->link_desc_banks[i].size = max_alloc_size;
1608 
1609 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1610 			soc->link_desc_banks[i].base_vaddr_unaligned) +
1611 			((unsigned long)(
1612 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1613 			link_desc_align));
1614 
1615 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
1616 			soc->link_desc_banks[i].base_paddr_unaligned) +
1617 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1618 			(unsigned long)(
1619 			soc->link_desc_banks[i].base_vaddr_unaligned));
1620 
1621 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
1622 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1623 				FL("Link descriptor memory alloc failed"));
1624 			goto fail;
1625 		}
1626 	}
1627 
1628 	if (last_bank_size) {
1629 		/* Allocate last bank in case total memory required is not exact
1630 		 * multiple of max_alloc_size
1631 		 */
1632 		soc->link_desc_banks[i].base_vaddr_unaligned =
1633 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1634 			last_bank_size,
1635 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1636 		soc->link_desc_banks[i].size = last_bank_size;
1637 
1638 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1639 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
1640 			((unsigned long)(
1641 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1642 			link_desc_align));
1643 
1644 		soc->link_desc_banks[i].base_paddr =
1645 			(unsigned long)(
1646 			soc->link_desc_banks[i].base_paddr_unaligned) +
1647 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1648 			(unsigned long)(
1649 			soc->link_desc_banks[i].base_vaddr_unaligned));
1650 	}
1651 
1652 
1653 	/* Allocate and setup link descriptor idle list for HW internal use */
1654 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1655 	total_mem_size = entry_size * total_link_descs;
1656 
1657 	if (total_mem_size <= max_alloc_size) {
1658 		void *desc;
1659 
1660 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
1661 			WBM_IDLE_LINK, 0, 0, total_link_descs)) {
1662 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1663 				FL("Link desc idle ring setup failed"));
1664 			goto fail;
1665 		}
1666 
1667 		hal_srng_access_start_unlocked(soc->hal_soc,
1668 			soc->wbm_idle_link_ring.hal_srng);
1669 
1670 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1671 			soc->link_desc_banks[i].base_paddr; i++) {
1672 			uint32_t num_entries = (soc->link_desc_banks[i].size -
1673 				((unsigned long)(
1674 				soc->link_desc_banks[i].base_vaddr) -
1675 				(unsigned long)(
1676 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1677 				/ link_desc_size;
1678 			unsigned long paddr = (unsigned long)(
1679 				soc->link_desc_banks[i].base_paddr);
1680 
1681 			while (num_entries && (desc = hal_srng_src_get_next(
1682 				soc->hal_soc,
1683 				soc->wbm_idle_link_ring.hal_srng))) {
1684 				hal_set_link_desc_addr(desc,
1685 					LINK_DESC_COOKIE(desc_id, i), paddr);
1686 				num_entries--;
1687 				desc_id++;
1688 				paddr += link_desc_size;
1689 			}
1690 		}
1691 		hal_srng_access_end_unlocked(soc->hal_soc,
1692 			soc->wbm_idle_link_ring.hal_srng);
1693 	} else {
1694 		uint32_t num_scatter_bufs;
1695 		uint32_t num_entries_per_buf;
1696 		uint32_t rem_entries;
1697 		uint8_t *scatter_buf_ptr;
1698 		uint16_t scatter_buf_num;
1699 
1700 		soc->wbm_idle_scatter_buf_size =
1701 			hal_idle_list_scatter_buf_size(soc->hal_soc);
1702 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
1703 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
1704 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1705 					soc->hal_soc, total_mem_size,
1706 					soc->wbm_idle_scatter_buf_size);
1707 
1708 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
1709 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1710 					FL("scatter bufs size out of bounds"));
1711 			goto fail;
1712 		}
1713 
1714 		for (i = 0; i < num_scatter_bufs; i++) {
1715 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
1716 				qdf_mem_alloc_consistent(soc->osdev,
1717 							soc->osdev->dev,
1718 				soc->wbm_idle_scatter_buf_size,
1719 				&(soc->wbm_idle_scatter_buf_base_paddr[i]));
1720 			if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
1721 				QDF_TRACE(QDF_MODULE_ID_DP,
1722 						QDF_TRACE_LEVEL_ERROR,
1723 					FL("Scatter list memory alloc failed"));
1724 				goto fail;
1725 			}
1726 		}
1727 
1728 		/* Populate idle list scatter buffers with link descriptor
1729 		 * pointers
1730 		 */
1731 		scatter_buf_num = 0;
1732 		scatter_buf_ptr = (uint8_t *)(
1733 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
1734 		rem_entries = num_entries_per_buf;
1735 
1736 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1737 			soc->link_desc_banks[i].base_paddr; i++) {
1738 			uint32_t num_link_descs =
1739 				(soc->link_desc_banks[i].size -
1740 				((unsigned long)(
1741 				soc->link_desc_banks[i].base_vaddr) -
1742 				(unsigned long)(
1743 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1744 				/ link_desc_size;
1745 			unsigned long paddr = (unsigned long)(
1746 				soc->link_desc_banks[i].base_paddr);
1747 
1748 			while (num_link_descs) {
1749 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
1750 					LINK_DESC_COOKIE(desc_id, i), paddr);
1751 				num_link_descs--;
1752 				desc_id++;
1753 				paddr += link_desc_size;
1754 				rem_entries--;
1755 				if (rem_entries) {
1756 					scatter_buf_ptr += entry_size;
1757 				} else {
1758 					rem_entries = num_entries_per_buf;
1759 					scatter_buf_num++;
1760 
1761 					if (scatter_buf_num >= num_scatter_bufs)
1762 						break;
1763 
1764 					scatter_buf_ptr = (uint8_t *)(
1765 						soc->wbm_idle_scatter_buf_base_vaddr[
1766 						scatter_buf_num]);
1767 				}
1768 			}
1769 		}
1770 		/* Setup link descriptor idle list in HW */
1771 		hal_setup_link_idle_list(soc->hal_soc,
1772 			soc->wbm_idle_scatter_buf_base_paddr,
1773 			soc->wbm_idle_scatter_buf_base_vaddr,
1774 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
1775 			(uint32_t)(scatter_buf_ptr -
1776 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
1777 			scatter_buf_num-1])), total_link_descs);
1778 	}
1779 	return 0;
1780 
1781 fail:
1782 	if (soc->wbm_idle_link_ring.hal_srng) {
1783 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
1784 				WBM_IDLE_LINK, 0);
1785 	}
1786 
1787 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1788 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1789 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1790 				soc->wbm_idle_scatter_buf_size,
1791 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1792 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1793 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1794 		}
1795 	}
1796 
1797 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1798 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1799 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1800 				soc->link_desc_banks[i].size,
1801 				soc->link_desc_banks[i].base_vaddr_unaligned,
1802 				soc->link_desc_banks[i].base_paddr_unaligned,
1803 				0);
1804 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1805 		}
1806 	}
1807 	return QDF_STATUS_E_FAILURE;
1808 }
1809 
1810 /*
1811  * Free link descriptor pool that was setup HW
1812  */
1813 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
1814 {
1815 	int i;
1816 
1817 	if (soc->wbm_idle_link_ring.hal_srng) {
1818 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
1819 			WBM_IDLE_LINK, 0);
1820 	}
1821 
1822 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1823 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1824 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1825 				soc->wbm_idle_scatter_buf_size,
1826 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1827 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1828 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1829 		}
1830 	}
1831 
1832 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1833 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1834 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1835 				soc->link_desc_banks[i].size,
1836 				soc->link_desc_banks[i].base_vaddr_unaligned,
1837 				soc->link_desc_banks[i].base_paddr_unaligned,
1838 				0);
1839 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1840 		}
1841 	}
1842 }
1843 
1844 #define REO_DST_RING_SIZE_QCA6290 1024
1845 #define REO_DST_RING_SIZE_QCA8074 2048
1846 
1847 /*
1848  * dp_wds_aging_timer_fn() - Timer callback function for WDS aging
1849  * @soc: Datapath SOC handle
1850  *
1851  * This is a timer function used to age out stale AST nodes from
1852  * AST table
1853  */
1854 #ifdef FEATURE_WDS
1855 static void dp_wds_aging_timer_fn(void *soc_hdl)
1856 {
1857 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
1858 	struct dp_pdev *pdev;
1859 	struct dp_vdev *vdev;
1860 	struct dp_peer *peer;
1861 	struct dp_ast_entry *ase, *temp_ase;
1862 	int i;
1863 
1864 	qdf_spin_lock_bh(&soc->ast_lock);
1865 
1866 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1867 		pdev = soc->pdev_list[i];
1868 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
1869 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1870 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1871 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
1872 					/*
1873 					 * Do not expire static ast entries
1874 					 * and HM WDS entries
1875 					 */
1876 					if (ase->type != CDP_TXRX_AST_TYPE_WDS)
1877 						continue;
1878 
1879 					if (ase->is_active) {
1880 						ase->is_active = FALSE;
1881 						continue;
1882 					}
1883 
1884 					DP_STATS_INC(soc, ast.aged_out, 1);
1885 					dp_peer_del_ast(soc, ase);
1886 				}
1887 			}
1888 		}
1889 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1890 	}
1891 
1892 	qdf_spin_unlock_bh(&soc->ast_lock);
1893 
1894 	if (qdf_atomic_read(&soc->cmn_init_done))
1895 		qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1896 }
1897 
1898 
1899 /*
1900  * dp_soc_wds_attach() - Setup WDS timer and AST table
1901  * @soc:		Datapath SOC handle
1902  *
1903  * Return: None
1904  */
1905 static void dp_soc_wds_attach(struct dp_soc *soc)
1906 {
1907 	qdf_timer_init(soc->osdev, &soc->wds_aging_timer,
1908 			dp_wds_aging_timer_fn, (void *)soc,
1909 			QDF_TIMER_TYPE_WAKE_APPS);
1910 
1911 	qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1912 }
1913 
1914 /*
1915  * dp_soc_wds_detach() - Detach WDS data structures and timers
1916  * @txrx_soc: DP SOC handle
1917  *
1918  * Return: None
1919  */
1920 static void dp_soc_wds_detach(struct dp_soc *soc)
1921 {
1922 	qdf_timer_stop(&soc->wds_aging_timer);
1923 	qdf_timer_free(&soc->wds_aging_timer);
1924 }
1925 #else
1926 static void dp_soc_wds_attach(struct dp_soc *soc)
1927 {
1928 }
1929 
1930 static void dp_soc_wds_detach(struct dp_soc *soc)
1931 {
1932 }
1933 #endif
1934 
1935 /*
1936  * dp_soc_reset_ring_map() - Reset cpu ring map
1937  * @soc: Datapath soc handler
1938  *
1939  * This api resets the default cpu ring map
1940  */
1941 
1942 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
1943 {
1944 	uint8_t i;
1945 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1946 
1947 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
1948 		if (nss_config == 1) {
1949 			/*
1950 			 * Setting Tx ring map for one nss offloaded radio
1951 			 */
1952 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
1953 		} else if (nss_config == 2) {
1954 			/*
1955 			 * Setting Tx ring for two nss offloaded radios
1956 			 */
1957 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
1958 		} else {
1959 			/*
1960 			 * Setting Tx ring map for all nss offloaded radios
1961 			 */
1962 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_ALL_RADIO_OFFLOADED_MAP][i];
1963 		}
1964 	}
1965 }
1966 
1967 /*
1968  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
1969  * @dp_soc - DP soc handle
1970  * @ring_type - ring type
1971  * @ring_num - ring_num
1972  *
1973  * return 0 or 1
1974  */
1975 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
1976 {
1977 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1978 	uint8_t status = 0;
1979 
1980 	switch (ring_type) {
1981 	case WBM2SW_RELEASE:
1982 	case REO_DST:
1983 	case RXDMA_BUF:
1984 		status = ((nss_config) & (1 << ring_num));
1985 		break;
1986 	default:
1987 		break;
1988 	}
1989 
1990 	return status;
1991 }
1992 
1993 /*
1994  * dp_soc_reset_intr_mask() - reset interrupt mask
1995  * @dp_soc - DP Soc handle
1996  *
1997  * Return: Return void
1998  */
1999 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
2000 {
2001 	uint8_t j;
2002 	int *grp_mask = NULL;
2003 	int group_number, mask, num_ring;
2004 
2005 	/* number of tx ring */
2006 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2007 
2008 	/*
2009 	 * group mask for tx completion  ring.
2010 	 */
2011 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2012 
2013 	/* loop and reset the mask for only offloaded ring */
2014 	for (j = 0; j < num_ring; j++) {
2015 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2016 			continue;
2017 		}
2018 
2019 		/*
2020 		 * Group number corresponding to tx offloaded ring.
2021 		 */
2022 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2023 		if (group_number < 0) {
2024 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2025 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2026 					WBM2SW_RELEASE, j);
2027 			return;
2028 		}
2029 
2030 		/* reset the tx mask for offloaded ring */
2031 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2032 		mask &= (~(1 << j));
2033 
2034 		/*
2035 		 * reset the interrupt mask for offloaded ring.
2036 		 */
2037 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2038 	}
2039 
2040 	/* number of rx rings */
2041 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2042 
2043 	/*
2044 	 * group mask for reo destination ring.
2045 	 */
2046 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2047 
2048 	/* loop and reset the mask for only offloaded ring */
2049 	for (j = 0; j < num_ring; j++) {
2050 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2051 			continue;
2052 		}
2053 
2054 		/*
2055 		 * Group number corresponding to rx offloaded ring.
2056 		 */
2057 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2058 		if (group_number < 0) {
2059 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2060 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2061 					REO_DST, j);
2062 			return;
2063 		}
2064 
2065 		/* set the interrupt mask for offloaded ring */
2066 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2067 		mask &= (~(1 << j));
2068 
2069 		/*
2070 		 * set the interrupt mask to zero for rx offloaded radio.
2071 		 */
2072 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2073 	}
2074 
2075 	/*
2076 	 * group mask for Rx buffer refill ring
2077 	 */
2078 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2079 
2080 	/* loop and reset the mask for only offloaded ring */
2081 	for (j = 0; j < MAX_PDEV_CNT; j++) {
2082 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2083 			continue;
2084 		}
2085 
2086 		/*
2087 		 * Group number corresponding to rx offloaded ring.
2088 		 */
2089 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2090 		if (group_number < 0) {
2091 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2092 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2093 					REO_DST, j);
2094 			return;
2095 		}
2096 
2097 		/* set the interrupt mask for offloaded ring */
2098 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2099 				group_number);
2100 		mask &= (~(1 << j));
2101 
2102 		/*
2103 		 * set the interrupt mask to zero for rx offloaded radio.
2104 		 */
2105 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2106 			group_number, mask);
2107 	}
2108 }
2109 
2110 #ifdef IPA_OFFLOAD
2111 /**
2112  * dp_reo_remap_config() - configure reo remap register value based
2113  *                         nss configuration.
2114  *		based on offload_radio value below remap configuration
2115  *		get applied.
2116  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2117  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2118  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2119  *		3 - both Radios handled by NSS (remap not required)
2120  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2121  *
2122  * @remap1: output parameter indicates reo remap 1 register value
2123  * @remap2: output parameter indicates reo remap 2 register value
2124  * Return: bool type, true if remap is configured else false.
2125  */
2126 static bool dp_reo_remap_config(struct dp_soc *soc,
2127 				uint32_t *remap1,
2128 				uint32_t *remap2)
2129 {
2130 
2131 	*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
2132 		(0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
2133 
2134 	*remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
2135 		(0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
2136 
2137 	return true;
2138 }
2139 #else
2140 static bool dp_reo_remap_config(struct dp_soc *soc,
2141 				uint32_t *remap1,
2142 				uint32_t *remap2)
2143 {
2144 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2145 
2146 	switch (offload_radio) {
2147 	case 0:
2148 		*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2149 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2150 			(0x3 << 18) | (0x4 << 21)) << 8;
2151 
2152 		*remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2153 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2154 			(0x3 << 18) | (0x4 << 21)) << 8;
2155 		break;
2156 
2157 	case 1:
2158 		*remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
2159 			(0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
2160 			(0x2 << 18) | (0x3 << 21)) << 8;
2161 
2162 		*remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
2163 			(0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
2164 			(0x4 << 18) | (0x2 << 21)) << 8;
2165 		break;
2166 
2167 	case 2:
2168 		*remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
2169 			(0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
2170 			(0x1 << 18) | (0x3 << 21)) << 8;
2171 
2172 		*remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
2173 			(0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
2174 			(0x4 << 18) | (0x1 << 21)) << 8;
2175 		break;
2176 
2177 	case 3:
2178 		/* return false if both radios are offloaded to NSS */
2179 		return false;
2180 	}
2181 	return true;
2182 }
2183 #endif
2184 
2185 /*
2186  * dp_reo_frag_dst_set() - configure reo register to set the
2187  *                        fragment destination ring
2188  * @soc : Datapath soc
2189  * @frag_dst_ring : output parameter to set fragment destination ring
2190  *
2191  * Based on offload_radio below fragment destination rings is selected
2192  * 0 - TCL
2193  * 1 - SW1
2194  * 2 - SW2
2195  * 3 - SW3
2196  * 4 - SW4
2197  * 5 - Release
2198  * 6 - FW
2199  * 7 - alternate select
2200  *
2201  * return: void
2202  */
2203 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2204 {
2205 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2206 
2207 	switch (offload_radio) {
2208 	case 0:
2209 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2210 		break;
2211 	case 3:
2212 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2213 		break;
2214 	default:
2215 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2216 				FL("dp_reo_frag_dst_set invalid offload radio config"));
2217 		break;
2218 	}
2219 }
2220 
2221 /*
2222  * dp_soc_cmn_setup() - Common SoC level initializion
2223  * @soc:		Datapath SOC handle
2224  *
2225  * This is an internal function used to setup common SOC data structures,
2226  * to be called from PDEV attach after receiving HW mode capabilities from FW
2227  */
2228 static int dp_soc_cmn_setup(struct dp_soc *soc)
2229 {
2230 	int i;
2231 	struct hal_reo_params reo_params;
2232 	int tx_ring_size;
2233 	int tx_comp_ring_size;
2234 	int reo_dst_ring_size;
2235 	uint32_t entries;
2236 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2237 
2238 	if (qdf_atomic_read(&soc->cmn_init_done))
2239 		return 0;
2240 
2241 	if (dp_hw_link_desc_pool_setup(soc))
2242 		goto fail1;
2243 
2244 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2245 	/* Setup SRNG rings */
2246 	/* Common rings */
2247 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
2248 		wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx))) {
2249 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2250 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
2251 		goto fail1;
2252 	}
2253 
2254 
2255 	soc->num_tcl_data_rings = 0;
2256 	/* Tx data rings */
2257 	if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
2258 		soc->num_tcl_data_rings =
2259 			wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
2260 		tx_comp_ring_size =
2261 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2262 		tx_ring_size =
2263 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2264 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2265 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
2266 				TCL_DATA, i, 0, tx_ring_size)) {
2267 				QDF_TRACE(QDF_MODULE_ID_DP,
2268 					QDF_TRACE_LEVEL_ERROR,
2269 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
2270 				goto fail1;
2271 			}
2272 			/*
2273 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2274 			 * count
2275 			 */
2276 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
2277 				WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
2278 				QDF_TRACE(QDF_MODULE_ID_DP,
2279 					QDF_TRACE_LEVEL_ERROR,
2280 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
2281 				goto fail1;
2282 			}
2283 		}
2284 	} else {
2285 		/* This will be incremented during per pdev ring setup */
2286 		soc->num_tcl_data_rings = 0;
2287 	}
2288 
2289 	if (dp_tx_soc_attach(soc)) {
2290 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2291 				FL("dp_tx_soc_attach failed"));
2292 		goto fail1;
2293 	}
2294 
2295 	entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
2296 	/* TCL command and status rings */
2297 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
2298 			  entries)) {
2299 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2300 			FL("dp_srng_setup failed for tcl_cmd_ring"));
2301 		goto fail1;
2302 	}
2303 
2304 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
2305 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
2306 			  entries)) {
2307 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2308 			FL("dp_srng_setup failed for tcl_status_ring"));
2309 		goto fail1;
2310 	}
2311 
2312 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2313 
2314 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2315 	 * descriptors
2316 	 */
2317 
2318 	/* Rx data rings */
2319 	if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2320 		soc->num_reo_dest_rings =
2321 			wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
2322 		QDF_TRACE(QDF_MODULE_ID_DP,
2323 			QDF_TRACE_LEVEL_INFO,
2324 			FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
2325 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2326 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2327 				i, 0, reo_dst_ring_size)) {
2328 				QDF_TRACE(QDF_MODULE_ID_DP,
2329 					  QDF_TRACE_LEVEL_ERROR,
2330 					  FL(RNG_ERR "reo_dest_ring [%d]"), i);
2331 				goto fail1;
2332 			}
2333 		}
2334 	} else {
2335 		/* This will be incremented during per pdev ring setup */
2336 		soc->num_reo_dest_rings = 0;
2337 	}
2338 
2339 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2340 	/* LMAC RxDMA to SW Rings configuration */
2341 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
2342 		/* Only valid for MCL */
2343 		struct dp_pdev *pdev = soc->pdev_list[0];
2344 
2345 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2346 			if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2347 					  RXDMA_DST, 0, i,
2348 					  entries)) {
2349 				QDF_TRACE(QDF_MODULE_ID_DP,
2350 					  QDF_TRACE_LEVEL_ERROR,
2351 					  FL(RNG_ERR "rxdma_err_dst_ring"));
2352 				goto fail1;
2353 			}
2354 		}
2355 	}
2356 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
2357 
2358 	/* REO reinjection ring */
2359 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
2360 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2361 			  entries)) {
2362 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2363 			  FL("dp_srng_setup failed for reo_reinject_ring"));
2364 		goto fail1;
2365 	}
2366 
2367 
2368 	/* Rx release ring */
2369 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2370 		wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx))) {
2371 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2372 			  FL("dp_srng_setup failed for rx_rel_ring"));
2373 		goto fail1;
2374 	}
2375 
2376 
2377 	/* Rx exception ring */
2378 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
2379 	if (dp_srng_setup(soc, &soc->reo_exception_ring,
2380 			  REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries)) {
2381 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2382 			  FL("dp_srng_setup failed for reo_exception_ring"));
2383 		goto fail1;
2384 	}
2385 
2386 
2387 	/* REO command and status rings */
2388 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
2389 		wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx))) {
2390 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2391 			FL("dp_srng_setup failed for reo_cmd_ring"));
2392 		goto fail1;
2393 	}
2394 
2395 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2396 	TAILQ_INIT(&soc->rx.reo_cmd_list);
2397 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2398 
2399 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
2400 		wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx))) {
2401 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2402 			FL("dp_srng_setup failed for reo_status_ring"));
2403 		goto fail1;
2404 	}
2405 
2406 	qdf_spinlock_create(&soc->ast_lock);
2407 	dp_soc_wds_attach(soc);
2408 
2409 	/* Reset the cpu ring map if radio is NSS offloaded */
2410 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
2411 		dp_soc_reset_cpu_ring_map(soc);
2412 		dp_soc_reset_intr_mask(soc);
2413 	}
2414 
2415 	/* Setup HW REO */
2416 	qdf_mem_zero(&reo_params, sizeof(reo_params));
2417 
2418 	if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
2419 
2420 		/*
2421 		 * Reo ring remap is not required if both radios
2422 		 * are offloaded to NSS
2423 		 */
2424 		if (!dp_reo_remap_config(soc,
2425 					&reo_params.remap1,
2426 					&reo_params.remap2))
2427 			goto out;
2428 
2429 		reo_params.rx_hash_enabled = true;
2430 	}
2431 
2432 	/* setup the global rx defrag waitlist */
2433 	TAILQ_INIT(&soc->rx.defrag.waitlist);
2434 	soc->rx.defrag.timeout_ms =
2435 		wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
2436 	soc->rx.flags.defrag_timeout_check =
2437 		wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
2438 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
2439 
2440 out:
2441 	/*
2442 	 * set the fragment destination ring
2443 	 */
2444 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2445 
2446 	hal_reo_setup(soc->hal_soc, &reo_params);
2447 
2448 	qdf_atomic_set(&soc->cmn_init_done, 1);
2449 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
2450 	return 0;
2451 fail1:
2452 	/*
2453 	 * Cleanup will be done as part of soc_detach, which will
2454 	 * be called on pdev attach failure
2455 	 */
2456 	return QDF_STATUS_E_FAILURE;
2457 }
2458 
2459 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
2460 
2461 static void dp_lro_hash_setup(struct dp_soc *soc)
2462 {
2463 	struct cdp_lro_hash_config lro_hash;
2464 
2465 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2466 		!wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2467 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2468 			 FL("LRO disabled RX hash disabled"));
2469 		return;
2470 	}
2471 
2472 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2473 
2474 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
2475 		lro_hash.lro_enable = 1;
2476 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2477 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
2478 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2479 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
2480 	}
2481 
2482 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, FL("enabled"));
2483 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
2484 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2485 		 LRO_IPV4_SEED_ARR_SZ));
2486 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2487 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2488 		 LRO_IPV6_SEED_ARR_SZ));
2489 
2490 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2491 		 "lro_hash: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
2492 		 lro_hash.lro_enable, lro_hash.tcp_flag,
2493 		 lro_hash.tcp_flag_mask);
2494 
2495 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2496 		 QDF_TRACE_LEVEL_ERROR,
2497 		 (void *)lro_hash.toeplitz_hash_ipv4,
2498 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2499 		 LRO_IPV4_SEED_ARR_SZ));
2500 
2501 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2502 		 QDF_TRACE_LEVEL_ERROR,
2503 		 (void *)lro_hash.toeplitz_hash_ipv6,
2504 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2505 		 LRO_IPV6_SEED_ARR_SZ));
2506 
2507 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2508 
2509 	if (soc->cdp_soc.ol_ops->lro_hash_config)
2510 		(void)soc->cdp_soc.ol_ops->lro_hash_config
2511 			(soc->ctrl_psoc, &lro_hash);
2512 }
2513 
2514 /*
2515 * dp_rxdma_ring_setup() - configure the RX DMA rings
2516 * @soc: data path SoC handle
2517 * @pdev: Physical device handle
2518 *
2519 * Return: 0 - success, > 0 - failure
2520 */
2521 #ifdef QCA_HOST2FW_RXBUF_RING
2522 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2523 	 struct dp_pdev *pdev)
2524 {
2525 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2526 	int max_mac_rings;
2527 	int i;
2528 
2529 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2530 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
2531 
2532 	for (i = 0; i < max_mac_rings; i++) {
2533 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2534 			 "%s: pdev_id %d mac_id %d",
2535 			 __func__, pdev->pdev_id, i);
2536 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
2537 			RXDMA_BUF, 1, i,
2538 			wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx))) {
2539 			QDF_TRACE(QDF_MODULE_ID_DP,
2540 				 QDF_TRACE_LEVEL_ERROR,
2541 				 FL("failed rx mac ring setup"));
2542 			return QDF_STATUS_E_FAILURE;
2543 		}
2544 	}
2545 	return QDF_STATUS_SUCCESS;
2546 }
2547 #else
2548 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2549 	 struct dp_pdev *pdev)
2550 {
2551 	return QDF_STATUS_SUCCESS;
2552 }
2553 #endif
2554 
2555 /**
2556  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2557  * @pdev - DP_PDEV handle
2558  *
2559  * Return: void
2560  */
2561 static inline void
2562 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2563 {
2564 	uint8_t map_id;
2565 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2566 		qdf_mem_copy(pdev->dscp_tid_map[map_id], default_dscp_tid_map,
2567 				sizeof(default_dscp_tid_map));
2568 	}
2569 	for (map_id = 0; map_id < HAL_MAX_HW_DSCP_TID_MAPS; map_id++) {
2570 		hal_tx_set_dscp_tid_map(pdev->soc->hal_soc,
2571 				pdev->dscp_tid_map[map_id],
2572 				map_id);
2573 	}
2574 }
2575 
2576 #ifdef QCA_SUPPORT_SON
2577 /**
2578  * dp_mark_peer_inact(): Update peer inactivity status
2579  * @peer_handle - datapath peer handle
2580  *
2581  * Return: void
2582  */
2583 void dp_mark_peer_inact(void *peer_handle, bool inactive)
2584 {
2585 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2586 	struct dp_pdev *pdev;
2587 	struct dp_soc *soc;
2588 	bool inactive_old;
2589 
2590 	if (!peer)
2591 		return;
2592 
2593 	pdev = peer->vdev->pdev;
2594 	soc = pdev->soc;
2595 
2596 	inactive_old = peer->peer_bs_inact_flag == 1;
2597 	if (!inactive)
2598 		peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2599 	peer->peer_bs_inact_flag = inactive ? 1 : 0;
2600 
2601 	if (inactive_old != inactive) {
2602 		/**
2603 		 * Note: a node lookup can happen in RX datapath context
2604 		 * when a node changes from inactive to active (at most once
2605 		 * per inactivity timeout threshold)
2606 		 */
2607 		if (soc->cdp_soc.ol_ops->record_act_change) {
2608 			soc->cdp_soc.ol_ops->record_act_change(
2609 					(void *)pdev->ctrl_pdev,
2610 					peer->mac_addr.raw, !inactive);
2611 		}
2612 	}
2613 }
2614 
2615 /**
2616  * dp_txrx_peer_find_inact_timeout_handler(): Inactivity timeout function
2617  *
2618  * Periodically checks the inactivity status
2619  */
2620 static os_timer_func(dp_txrx_peer_find_inact_timeout_handler)
2621 {
2622 	struct dp_pdev *pdev;
2623 	struct dp_vdev *vdev;
2624 	struct dp_peer *peer;
2625 	struct dp_soc *soc;
2626 	int i;
2627 
2628 	OS_GET_TIMER_ARG(soc, struct dp_soc *);
2629 
2630 	qdf_spin_lock(&soc->peer_ref_mutex);
2631 
2632 	for (i = 0; i < soc->pdev_count; i++) {
2633 	pdev = soc->pdev_list[i];
2634 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
2635 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2636 		if (vdev->opmode != wlan_op_mode_ap)
2637 			continue;
2638 
2639 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2640 			if (!peer->authorize) {
2641 				/**
2642 				 * Inactivity check only interested in
2643 				 * connected node
2644 				 */
2645 				continue;
2646 			}
2647 			if (peer->peer_bs_inact > soc->pdev_bs_inact_reload) {
2648 				/**
2649 				 * This check ensures we do not wait extra long
2650 				 * due to the potential race condition
2651 				 */
2652 				peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2653 			}
2654 			if (peer->peer_bs_inact > 0) {
2655 				/* Do not let it wrap around */
2656 				peer->peer_bs_inact--;
2657 			}
2658 			if (peer->peer_bs_inact == 0)
2659 				dp_mark_peer_inact(peer, true);
2660 		}
2661 	}
2662 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2663 	}
2664 
2665 	qdf_spin_unlock(&soc->peer_ref_mutex);
2666 	qdf_timer_mod(&soc->pdev_bs_inact_timer,
2667 		      soc->pdev_bs_inact_interval * 1000);
2668 }
2669 
2670 
2671 /**
2672  * dp_free_inact_timer(): free inact timer
2673  * @timer - inact timer handle
2674  *
2675  * Return: bool
2676  */
2677 void dp_free_inact_timer(struct dp_soc *soc)
2678 {
2679 	qdf_timer_free(&soc->pdev_bs_inact_timer);
2680 }
2681 #else
2682 
2683 void dp_mark_peer_inact(void *peer, bool inactive)
2684 {
2685 	return;
2686 }
2687 
2688 void dp_free_inact_timer(struct dp_soc *soc)
2689 {
2690 	return;
2691 }
2692 
2693 #endif
2694 
2695 #ifdef IPA_OFFLOAD
2696 /**
2697  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
2698  * @soc: data path instance
2699  * @pdev: core txrx pdev context
2700  *
2701  * Return: QDF_STATUS_SUCCESS: success
2702  *         QDF_STATUS_E_RESOURCES: Error return
2703  */
2704 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2705 					   struct dp_pdev *pdev)
2706 {
2707 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2708 	int entries;
2709 
2710 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2711 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
2712 
2713 	/* Setup second Rx refill buffer ring */
2714 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2715 			  IPA_RX_REFILL_BUF_RING_IDX,
2716 			  pdev->pdev_id,
2717 			  entries)) {
2718 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2719 			FL("dp_srng_setup failed second rx refill ring"));
2720 		return QDF_STATUS_E_FAILURE;
2721 	}
2722 	return QDF_STATUS_SUCCESS;
2723 }
2724 
2725 /**
2726  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
2727  * @soc: data path instance
2728  * @pdev: core txrx pdev context
2729  *
2730  * Return: void
2731  */
2732 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2733 					      struct dp_pdev *pdev)
2734 {
2735 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2736 			IPA_RX_REFILL_BUF_RING_IDX);
2737 }
2738 
2739 #else
2740 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2741 					   struct dp_pdev *pdev)
2742 {
2743 	return QDF_STATUS_SUCCESS;
2744 }
2745 
2746 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2747 					      struct dp_pdev *pdev)
2748 {
2749 }
2750 #endif
2751 
2752 #ifndef QCA_WIFI_QCA6390
2753 static
2754 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2755 {
2756 	int mac_id = 0;
2757 	int pdev_id = pdev->pdev_id;
2758 	int entries;
2759 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2760 
2761 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2762 
2763 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
2764 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
2765 
2766 		entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
2767 		if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
2768 				  RXDMA_MONITOR_BUF, 0, mac_for_pdev,
2769 				  entries)) {
2770 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2771 				  FL(RNG_ERR "rxdma_mon_buf_ring "));
2772 			return QDF_STATUS_E_NOMEM;
2773 		}
2774 
2775 		entries = wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
2776 		if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
2777 				  RXDMA_MONITOR_DST, 0, mac_for_pdev,
2778 				  entries)) {
2779 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2780 				  FL(RNG_ERR "rxdma_mon_dst_ring"));
2781 			return QDF_STATUS_E_NOMEM;
2782 		}
2783 
2784 		entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
2785 		if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring[mac_id],
2786 				  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
2787 				  entries)) {
2788 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2789 				  FL(RNG_ERR "rxdma_mon_status_ring"));
2790 			return QDF_STATUS_E_NOMEM;
2791 		}
2792 
2793 		entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
2794 		if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
2795 				  RXDMA_MONITOR_DESC, 0, mac_for_pdev,
2796 				  entries)) {
2797 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2798 				  FL(RNG_ERR "rxdma_mon_desc_ring"));
2799 			return QDF_STATUS_E_NOMEM;
2800 		}
2801 	}
2802 	return QDF_STATUS_SUCCESS;
2803 }
2804 #else
2805 static QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2806 {
2807 	return QDF_STATUS_SUCCESS;
2808 }
2809 #endif
2810 
2811 /*dp_iterate_update_peer_list - update peer stats on cal client timer
2812  * @pdev_hdl: pdev handle
2813  */
2814 #ifdef ATH_SUPPORT_EXT_STAT
2815 void  dp_iterate_update_peer_list(void *pdev_hdl)
2816 {
2817 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
2818 	struct dp_vdev *vdev = NULL;
2819 	struct dp_peer *peer = NULL;
2820 
2821 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
2822 		DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
2823 			dp_cal_client_update_peer_stats(&peer->stats);
2824 		}
2825 	}
2826 }
2827 #else
2828 void  dp_iterate_update_peer_list(void *pdev_hdl)
2829 {
2830 }
2831 #endif
2832 
2833 /*
2834 * dp_pdev_attach_wifi3() - attach txrx pdev
2835 * @ctrl_pdev: Opaque PDEV object
2836 * @txrx_soc: Datapath SOC handle
2837 * @htc_handle: HTC handle for host-target interface
2838 * @qdf_osdev: QDF OS device
2839 * @pdev_id: PDEV ID
2840 *
2841 * Return: DP PDEV handle on success, NULL on failure
2842 */
2843 static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
2844 	struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
2845 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
2846 {
2847 	int tx_ring_size;
2848 	int tx_comp_ring_size;
2849 	int reo_dst_ring_size;
2850 	int entries;
2851 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2852 	int nss_cfg;
2853 
2854 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2855 	struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
2856 
2857 	if (!pdev) {
2858 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2859 			FL("DP PDEV memory allocation failed"));
2860 		goto fail0;
2861 	}
2862 
2863 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2864 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
2865 
2866 	if (!pdev->wlan_cfg_ctx) {
2867 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2868 			FL("pdev cfg_attach failed"));
2869 
2870 		qdf_mem_free(pdev);
2871 		goto fail0;
2872 	}
2873 
2874 	/*
2875 	 * set nss pdev config based on soc config
2876 	 */
2877 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
2878 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
2879 			(nss_cfg & (1 << pdev_id)));
2880 
2881 	pdev->soc = soc;
2882 	pdev->ctrl_pdev = ctrl_pdev;
2883 	pdev->pdev_id = pdev_id;
2884 	soc->pdev_list[pdev_id] = pdev;
2885 	soc->pdev_count++;
2886 
2887 	TAILQ_INIT(&pdev->vdev_list);
2888 	qdf_spinlock_create(&pdev->vdev_list_lock);
2889 	pdev->vdev_count = 0;
2890 
2891 	qdf_spinlock_create(&pdev->tx_mutex);
2892 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
2893 	TAILQ_INIT(&pdev->neighbour_peers_list);
2894 	pdev->neighbour_peers_added = false;
2895 
2896 	if (dp_soc_cmn_setup(soc)) {
2897 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2898 			FL("dp_soc_cmn_setup failed"));
2899 		goto fail1;
2900 	}
2901 
2902 	/* Setup per PDEV TCL rings if configured */
2903 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2904 		tx_ring_size =
2905 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2906 		tx_comp_ring_size =
2907 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2908 
2909 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
2910 			pdev_id, pdev_id, tx_ring_size)) {
2911 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2912 				FL("dp_srng_setup failed for tcl_data_ring"));
2913 			goto fail1;
2914 		}
2915 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
2916 			WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
2917 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2918 				FL("dp_srng_setup failed for tx_comp_ring"));
2919 			goto fail1;
2920 		}
2921 		soc->num_tcl_data_rings++;
2922 	}
2923 
2924 	/* Tx specific init */
2925 	if (dp_tx_pdev_attach(pdev)) {
2926 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2927 			FL("dp_tx_pdev_attach failed"));
2928 		goto fail1;
2929 	}
2930 
2931 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2932 	/* Setup per PDEV REO rings if configured */
2933 	if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2934 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
2935 			pdev_id, pdev_id, reo_dst_ring_size)) {
2936 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2937 				FL("dp_srng_setup failed for reo_dest_ringn"));
2938 			goto fail1;
2939 		}
2940 		soc->num_reo_dest_rings++;
2941 
2942 	}
2943 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
2944 		wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx))) {
2945 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2946 			 FL("dp_srng_setup failed rx refill ring"));
2947 		goto fail1;
2948 	}
2949 
2950 	if (dp_rxdma_ring_setup(soc, pdev)) {
2951 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2952 			 FL("RXDMA ring config failed"));
2953 		goto fail1;
2954 	}
2955 
2956 	if (dp_mon_rings_setup(soc, pdev)) {
2957 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2958 			  FL("MONITOR rings setup failed"));
2959 		goto fail1;
2960 	}
2961 
2962 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2963 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2964 		if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
2965 				  0, pdev_id,
2966 				  entries)) {
2967 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2968 				  FL(RNG_ERR "rxdma_err_dst_ring"));
2969 			goto fail1;
2970 		}
2971 	}
2972 
2973 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
2974 		goto fail1;
2975 
2976 	if (dp_ipa_ring_resource_setup(soc, pdev))
2977 		goto fail1;
2978 
2979 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
2980 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2981 			FL("dp_ipa_uc_attach failed"));
2982 		goto fail1;
2983 	}
2984 
2985 	/* Rx specific init */
2986 	if (dp_rx_pdev_attach(pdev)) {
2987 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2988 			FL("dp_rx_pdev_attach failed"));
2989 		goto fail0;
2990 	}
2991 	DP_STATS_INIT(pdev);
2992 
2993 	/* Monitor filter init */
2994 	pdev->mon_filter_mode = MON_FILTER_ALL;
2995 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
2996 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
2997 	pdev->fp_data_filter = FILTER_DATA_ALL;
2998 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
2999 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
3000 	pdev->mo_data_filter = FILTER_DATA_ALL;
3001 
3002 	dp_local_peer_id_pool_init(pdev);
3003 
3004 	dp_dscp_tid_map_setup(pdev);
3005 
3006 	/* Rx monitor mode specific init */
3007 	if (dp_rx_pdev_mon_attach(pdev)) {
3008 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3009 				"dp_rx_pdev_attach failed");
3010 		goto fail1;
3011 	}
3012 
3013 	if (dp_wdi_event_attach(pdev)) {
3014 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3015 				"dp_wdi_evet_attach failed");
3016 		goto fail1;
3017 	}
3018 
3019 	/* set the reo destination during initialization */
3020 	pdev->reo_dest = pdev->pdev_id + 1;
3021 
3022 	/*
3023 	 * initialize ppdu tlv list
3024 	 */
3025 	TAILQ_INIT(&pdev->ppdu_info_list);
3026 	pdev->tlv_count = 0;
3027 	pdev->list_depth = 0;
3028 
3029 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
3030 
3031 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
3032 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
3033 			      TRUE);
3034 
3035 	/* initlialize cal client timer */
3036 	dp_cal_client_attach(&pdev->cal_client_ctx, pdev, pdev->soc->osdev,
3037 			     &dp_iterate_update_peer_list);
3038 
3039 	return (struct cdp_pdev *)pdev;
3040 
3041 fail1:
3042 	dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
3043 
3044 fail0:
3045 	return NULL;
3046 }
3047 
3048 /*
3049 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
3050 * @soc: data path SoC handle
3051 * @pdev: Physical device handle
3052 *
3053 * Return: void
3054 */
3055 #ifdef QCA_HOST2FW_RXBUF_RING
3056 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3057 	 struct dp_pdev *pdev)
3058 {
3059 	int max_mac_rings =
3060 		 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
3061 	int i;
3062 
3063 	max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
3064 				max_mac_rings : MAX_RX_MAC_RINGS;
3065 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
3066 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
3067 			 RXDMA_BUF, 1);
3068 
3069 	qdf_timer_free(&soc->mon_reap_timer);
3070 }
3071 #else
3072 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3073 	 struct dp_pdev *pdev)
3074 {
3075 }
3076 #endif
3077 
3078 /*
3079  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3080  * @pdev: device object
3081  *
3082  * Return: void
3083  */
3084 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3085 {
3086 	struct dp_neighbour_peer *peer = NULL;
3087 	struct dp_neighbour_peer *temp_peer = NULL;
3088 
3089 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3090 			neighbour_peer_list_elem, temp_peer) {
3091 		/* delete this peer from the list */
3092 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
3093 				peer, neighbour_peer_list_elem);
3094 		qdf_mem_free(peer);
3095 	}
3096 
3097 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3098 }
3099 
3100 /**
3101 * dp_htt_ppdu_stats_detach() - detach stats resources
3102 * @pdev: Datapath PDEV handle
3103 *
3104 * Return: void
3105 */
3106 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3107 {
3108 	struct ppdu_info *ppdu_info, *ppdu_info_next;
3109 
3110 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3111 			ppdu_info_list_elem, ppdu_info_next) {
3112 		if (!ppdu_info)
3113 			break;
3114 		qdf_assert_always(ppdu_info->nbuf);
3115 		qdf_nbuf_free(ppdu_info->nbuf);
3116 		qdf_mem_free(ppdu_info);
3117 	}
3118 }
3119 
3120 #ifndef QCA_WIFI_QCA6390
3121 static
3122 void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3123 			int mac_id)
3124 {
3125 		dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
3126 				RXDMA_MONITOR_BUF, 0);
3127 		dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
3128 				RXDMA_MONITOR_DST, 0);
3129 
3130 		dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring[mac_id],
3131 				RXDMA_MONITOR_STATUS, 0);
3132 
3133 		dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
3134 				RXDMA_MONITOR_DESC, 0);
3135 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3136 				RXDMA_DST, 0);
3137 }
3138 #else
3139 static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3140 			       int mac_id)
3141 {
3142 }
3143 #endif
3144 
3145 /*
3146 * dp_pdev_detach_wifi3() - detach txrx pdev
3147 * @txrx_pdev: Datapath PDEV handle
3148 * @force: Force detach
3149 *
3150 */
3151 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
3152 {
3153 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3154 	struct dp_soc *soc = pdev->soc;
3155 	qdf_nbuf_t curr_nbuf, next_nbuf;
3156 	int mac_id;
3157 
3158 	dp_wdi_event_detach(pdev);
3159 
3160 	dp_tx_pdev_detach(pdev);
3161 
3162 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3163 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
3164 			TCL_DATA, pdev->pdev_id);
3165 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
3166 			WBM2SW_RELEASE, pdev->pdev_id);
3167 	}
3168 
3169 	dp_pktlogmod_exit(pdev);
3170 
3171 	dp_rx_pdev_detach(pdev);
3172 	dp_rx_pdev_mon_detach(pdev);
3173 	dp_neighbour_peers_detach(pdev);
3174 	qdf_spinlock_destroy(&pdev->tx_mutex);
3175 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
3176 
3177 	dp_ipa_uc_detach(soc, pdev);
3178 
3179 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
3180 
3181 	/* Cleanup per PDEV REO rings if configured */
3182 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3183 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
3184 			REO_DST, pdev->pdev_id);
3185 	}
3186 
3187 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
3188 
3189 	dp_rxdma_ring_cleanup(soc, pdev);
3190 
3191 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3192 		dp_mon_ring_deinit(soc, pdev, mac_id);
3193 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3194 			RXDMA_DST, 0);
3195 	}
3196 
3197 	curr_nbuf = pdev->invalid_peer_head_msdu;
3198 	while (curr_nbuf) {
3199 		next_nbuf = qdf_nbuf_next(curr_nbuf);
3200 		qdf_nbuf_free(curr_nbuf);
3201 		curr_nbuf = next_nbuf;
3202 	}
3203 
3204 	dp_htt_ppdu_stats_detach(pdev);
3205 
3206 	qdf_nbuf_free(pdev->sojourn_buf);
3207 
3208 	dp_cal_client_detach(&pdev->cal_client_ctx);
3209 	soc->pdev_list[pdev->pdev_id] = NULL;
3210 	soc->pdev_count--;
3211 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
3212 	qdf_mem_free(pdev->dp_txrx_handle);
3213 	qdf_mem_free(pdev);
3214 }
3215 
3216 /*
3217  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
3218  * @soc: DP SOC handle
3219  */
3220 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
3221 {
3222 	struct reo_desc_list_node *desc;
3223 	struct dp_rx_tid *rx_tid;
3224 
3225 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3226 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
3227 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3228 		rx_tid = &desc->rx_tid;
3229 		qdf_mem_unmap_nbytes_single(soc->osdev,
3230 			rx_tid->hw_qdesc_paddr,
3231 			QDF_DMA_BIDIRECTIONAL,
3232 			rx_tid->hw_qdesc_alloc_size);
3233 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3234 		qdf_mem_free(desc);
3235 	}
3236 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3237 	qdf_list_destroy(&soc->reo_desc_freelist);
3238 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
3239 }
3240 
3241 /*
3242  * dp_soc_detach_wifi3() - Detach txrx SOC
3243  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
3244  */
3245 static void dp_soc_detach_wifi3(void *txrx_soc)
3246 {
3247 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3248 	int i;
3249 
3250 	qdf_atomic_set(&soc->cmn_init_done, 0);
3251 
3252 	qdf_flush_work(&soc->htt_stats.work);
3253 	qdf_disable_work(&soc->htt_stats.work);
3254 
3255 	/* Free pending htt stats messages */
3256 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
3257 
3258 	dp_free_inact_timer(soc);
3259 
3260 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3261 		if (soc->pdev_list[i])
3262 			dp_pdev_detach_wifi3(
3263 				(struct cdp_pdev *)soc->pdev_list[i], 1);
3264 	}
3265 
3266 	dp_peer_find_detach(soc);
3267 
3268 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
3269 	 * SW descriptors
3270 	 */
3271 
3272 	/* Free the ring memories */
3273 	/* Common rings */
3274 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3275 
3276 	dp_tx_soc_detach(soc);
3277 	/* Tx data rings */
3278 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3279 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3280 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
3281 				TCL_DATA, i);
3282 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
3283 				WBM2SW_RELEASE, i);
3284 		}
3285 	}
3286 
3287 	/* TCL command and status rings */
3288 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3289 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3290 
3291 	/* Rx data rings */
3292 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3293 		soc->num_reo_dest_rings =
3294 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3295 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
3296 			/* TODO: Get number of rings and ring sizes
3297 			 * from wlan_cfg
3298 			 */
3299 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
3300 				REO_DST, i);
3301 		}
3302 	}
3303 	/* REO reinjection ring */
3304 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3305 
3306 	/* Rx release ring */
3307 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3308 
3309 	/* Rx exception ring */
3310 	/* TODO: Better to store ring_type and ring_num in
3311 	 * dp_srng during setup
3312 	 */
3313 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3314 
3315 	/* REO command and status rings */
3316 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3317 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
3318 	dp_hw_link_desc_pool_cleanup(soc);
3319 
3320 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
3321 	qdf_spinlock_destroy(&soc->htt_stats.lock);
3322 
3323 	htt_soc_detach(soc->htt_handle);
3324 
3325 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3326 
3327 	dp_reo_cmdlist_destroy(soc);
3328 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3329 	dp_reo_desc_freelist_destroy(soc);
3330 
3331 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
3332 
3333 	dp_soc_wds_detach(soc);
3334 	qdf_spinlock_destroy(&soc->ast_lock);
3335 
3336 	qdf_mem_free(soc);
3337 }
3338 
3339 #ifndef QCA_WIFI_QCA6390
3340 static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3341 				  struct dp_pdev *pdev,
3342 				  int mac_id,
3343 				  int mac_for_pdev)
3344 {
3345 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3346 		       pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3347 		       RXDMA_MONITOR_BUF);
3348 
3349 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3350 		       pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3351 		       RXDMA_MONITOR_DST);
3352 
3353 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3354 		       pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3355 		       RXDMA_MONITOR_STATUS);
3356 
3357 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3358 		       pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3359 		       RXDMA_MONITOR_DESC);
3360 }
3361 #else
3362 static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3363 				  struct dp_pdev *pdev,
3364 				  int mac_id,
3365 				  int mac_for_pdev)
3366 {
3367 }
3368 #endif
3369 /*
3370  * dp_rxdma_ring_config() - configure the RX DMA rings
3371  *
3372  * This function is used to configure the MAC rings.
3373  * On MCL host provides buffers in Host2FW ring
3374  * FW refills (copies) buffers to the ring and updates
3375  * ring_idx in register
3376  *
3377  * @soc: data path SoC handle
3378  *
3379  * Return: void
3380  */
3381 #ifdef QCA_HOST2FW_RXBUF_RING
3382 static void dp_rxdma_ring_config(struct dp_soc *soc)
3383 {
3384 	int i;
3385 
3386 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3387 		struct dp_pdev *pdev = soc->pdev_list[i];
3388 
3389 		if (pdev) {
3390 			int mac_id;
3391 			bool dbs_enable = 0;
3392 			int max_mac_rings =
3393 				 wlan_cfg_get_num_mac_rings
3394 				(pdev->wlan_cfg_ctx);
3395 
3396 			htt_srng_setup(soc->htt_handle, 0,
3397 				 pdev->rx_refill_buf_ring.hal_srng,
3398 				 RXDMA_BUF);
3399 
3400 			if (pdev->rx_refill_buf_ring2.hal_srng)
3401 				htt_srng_setup(soc->htt_handle, 0,
3402 					pdev->rx_refill_buf_ring2.hal_srng,
3403 					RXDMA_BUF);
3404 
3405 			if (soc->cdp_soc.ol_ops->
3406 				is_hw_dbs_2x2_capable) {
3407 				dbs_enable = soc->cdp_soc.ol_ops->
3408 					is_hw_dbs_2x2_capable(soc->ctrl_psoc);
3409 			}
3410 
3411 			if (dbs_enable) {
3412 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3413 				QDF_TRACE_LEVEL_ERROR,
3414 				FL("DBS enabled max_mac_rings %d"),
3415 					 max_mac_rings);
3416 			} else {
3417 				max_mac_rings = 1;
3418 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3419 					 QDF_TRACE_LEVEL_ERROR,
3420 					 FL("DBS disabled, max_mac_rings %d"),
3421 					 max_mac_rings);
3422 			}
3423 
3424 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3425 					 FL("pdev_id %d max_mac_rings %d"),
3426 					 pdev->pdev_id, max_mac_rings);
3427 
3428 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
3429 				int mac_for_pdev = dp_get_mac_id_for_pdev(
3430 							mac_id, pdev->pdev_id);
3431 
3432 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3433 					 QDF_TRACE_LEVEL_ERROR,
3434 					 FL("mac_id %d"), mac_for_pdev);
3435 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3436 					 pdev->rx_mac_buf_ring[mac_id]
3437 						.hal_srng,
3438 					 RXDMA_BUF);
3439 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3440 					pdev->rxdma_err_dst_ring[mac_id]
3441 						.hal_srng,
3442 					RXDMA_DST);
3443 
3444 				/* Configure monitor mode rings */
3445 				dp_mon_htt_srng_setup(soc, pdev, mac_id,
3446 						      mac_for_pdev);
3447 
3448 			}
3449 		}
3450 	}
3451 
3452 	/*
3453 	 * Timer to reap rxdma status rings.
3454 	 * Needed until we enable ppdu end interrupts
3455 	 */
3456 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
3457 			dp_service_mon_rings, (void *)soc,
3458 			QDF_TIMER_TYPE_WAKE_APPS);
3459 	soc->reap_timer_init = 1;
3460 }
3461 #else
3462 /* This is only for WIN */
3463 static void dp_rxdma_ring_config(struct dp_soc *soc)
3464 {
3465 	int i;
3466 	int mac_id;
3467 
3468 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3469 		struct dp_pdev *pdev = soc->pdev_list[i];
3470 
3471 		if (pdev == NULL)
3472 			continue;
3473 
3474 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3475 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
3476 
3477 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3478 				pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
3479 
3480 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3481 				pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3482 				RXDMA_MONITOR_BUF);
3483 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3484 				pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3485 				RXDMA_MONITOR_DST);
3486 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3487 				pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3488 				RXDMA_MONITOR_STATUS);
3489 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3490 				pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3491 				RXDMA_MONITOR_DESC);
3492 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3493 				pdev->rxdma_err_dst_ring[mac_id].hal_srng,
3494 				RXDMA_DST);
3495 		}
3496 	}
3497 }
3498 #endif
3499 
3500 /*
3501  * dp_soc_attach_target_wifi3() - SOC initialization in the target
3502  * @txrx_soc: Datapath SOC handle
3503  */
3504 static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
3505 {
3506 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
3507 
3508 	htt_soc_attach_target(soc->htt_handle);
3509 
3510 	dp_rxdma_ring_config(soc);
3511 
3512 	DP_STATS_INIT(soc);
3513 
3514 	/* initialize work queue for stats processing */
3515 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
3516 
3517 	return 0;
3518 }
3519 
3520 /*
3521  * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
3522  * @txrx_soc: Datapath SOC handle
3523  */
3524 static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
3525 {
3526 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3527 	return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
3528 }
3529 /*
3530  * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
3531  * @txrx_soc: Datapath SOC handle
3532  * @nss_cfg: nss config
3533  */
3534 static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
3535 {
3536 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3537 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
3538 
3539 	wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
3540 
3541 	/*
3542 	 * TODO: masked out based on the per offloaded radio
3543 	 */
3544 	if (config == dp_nss_cfg_dbdc) {
3545 		wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
3546 		wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
3547 		wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
3548 		wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
3549 	}
3550 
3551 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3552 		  FL("nss-wifi<0> nss config is enabled"));
3553 }
3554 /*
3555 * dp_vdev_attach_wifi3() - attach txrx vdev
3556 * @txrx_pdev: Datapath PDEV handle
3557 * @vdev_mac_addr: MAC address of the virtual interface
3558 * @vdev_id: VDEV Id
3559 * @wlan_op_mode: VDEV operating mode
3560 *
3561 * Return: DP VDEV handle on success, NULL on failure
3562 */
3563 static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
3564 	uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
3565 {
3566 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3567 	struct dp_soc *soc = pdev->soc;
3568 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
3569 
3570 	if (!vdev) {
3571 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3572 			FL("DP VDEV memory allocation failed"));
3573 		goto fail0;
3574 	}
3575 
3576 	vdev->pdev = pdev;
3577 	vdev->vdev_id = vdev_id;
3578 	vdev->opmode = op_mode;
3579 	vdev->osdev = soc->osdev;
3580 
3581 	vdev->osif_rx = NULL;
3582 	vdev->osif_rsim_rx_decap = NULL;
3583 	vdev->osif_get_key = NULL;
3584 	vdev->osif_rx_mon = NULL;
3585 	vdev->osif_tx_free_ext = NULL;
3586 	vdev->osif_vdev = NULL;
3587 
3588 	vdev->delete.pending = 0;
3589 	vdev->safemode = 0;
3590 	vdev->drop_unenc = 1;
3591 	vdev->sec_type = cdp_sec_type_none;
3592 #ifdef notyet
3593 	vdev->filters_num = 0;
3594 #endif
3595 
3596 	qdf_mem_copy(
3597 		&vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3598 
3599 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3600 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3601 	vdev->dscp_tid_map_id = 0;
3602 	vdev->mcast_enhancement_en = 0;
3603 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
3604 
3605 	/* TODO: Initialize default HTT meta data that will be used in
3606 	 * TCL descriptors for packets transmitted from this VDEV
3607 	 */
3608 
3609 	TAILQ_INIT(&vdev->peer_list);
3610 
3611 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3612 	/* add this vdev into the pdev's list */
3613 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
3614 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3615 	pdev->vdev_count++;
3616 
3617 	dp_tx_vdev_attach(vdev);
3618 
3619 
3620 	if ((soc->intr_mode == DP_INTR_POLL) &&
3621 			wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
3622 		if (pdev->vdev_count == 1)
3623 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
3624 	}
3625 
3626 	dp_lro_hash_setup(soc);
3627 
3628 	/* LRO */
3629 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3630 		wlan_op_mode_sta == vdev->opmode)
3631 		vdev->lro_enable = true;
3632 
3633 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3634 		 "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable);
3635 
3636 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3637 		"Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
3638 	DP_STATS_INIT(vdev);
3639 
3640 	if (wlan_op_mode_sta == vdev->opmode)
3641 		dp_peer_create_wifi3((struct cdp_vdev *)vdev,
3642 							vdev->mac_addr.raw,
3643 							NULL);
3644 
3645 	return (struct cdp_vdev *)vdev;
3646 
3647 fail0:
3648 	return NULL;
3649 }
3650 
3651 /**
3652  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
3653  * @vdev: Datapath VDEV handle
3654  * @osif_vdev: OSIF vdev handle
3655  * @ctrl_vdev: UMAC vdev handle
3656  * @txrx_ops: Tx and Rx operations
3657  *
3658  * Return: DP VDEV handle on success, NULL on failure
3659  */
3660 static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
3661 	void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
3662 	struct ol_txrx_ops *txrx_ops)
3663 {
3664 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3665 	vdev->osif_vdev = osif_vdev;
3666 	vdev->ctrl_vdev = ctrl_vdev;
3667 	vdev->osif_rx = txrx_ops->rx.rx;
3668 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
3669 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
3670 	vdev->osif_get_key = txrx_ops->get_key;
3671 	vdev->osif_rx_mon = txrx_ops->rx.mon;
3672 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
3673 #ifdef notyet
3674 #if ATH_SUPPORT_WAPI
3675 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
3676 #endif
3677 #endif
3678 #ifdef UMAC_SUPPORT_PROXY_ARP
3679 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
3680 #endif
3681 	vdev->me_convert = txrx_ops->me_convert;
3682 
3683 	/* TODO: Enable the following once Tx code is integrated */
3684 	if (vdev->mesh_vdev)
3685 		txrx_ops->tx.tx = dp_tx_send_mesh;
3686 	else
3687 		txrx_ops->tx.tx = dp_tx_send;
3688 
3689 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
3690 
3691 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
3692 		"DP Vdev Register success");
3693 }
3694 
3695 /**
3696  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
3697  * @vdev: Datapath VDEV handle
3698  *
3699  * Return: void
3700  */
3701 static void dp_vdev_flush_peers(struct dp_vdev *vdev)
3702 {
3703 	struct dp_pdev *pdev = vdev->pdev;
3704 	struct dp_soc *soc = pdev->soc;
3705 	struct dp_peer *peer;
3706 	uint16_t *peer_ids;
3707 	uint8_t i = 0, j = 0;
3708 
3709 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
3710 	if (!peer_ids) {
3711 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3712 			"DP alloc failure - unable to flush peers");
3713 		return;
3714 	}
3715 
3716 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3717 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3718 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3719 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
3720 				if (j < soc->max_peers)
3721 					peer_ids[j++] = peer->peer_ids[i];
3722 	}
3723 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3724 
3725 	for (i = 0; i < j ; i++)
3726 		dp_rx_peer_unmap_handler(soc, peer_ids[i]);
3727 
3728 	qdf_mem_free(peer_ids);
3729 
3730 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3731 		FL("Flushed peers for vdev object %pK "), vdev);
3732 }
3733 
3734 /*
3735  * dp_vdev_detach_wifi3() - Detach txrx vdev
3736  * @txrx_vdev:		Datapath VDEV handle
3737  * @callback:		Callback OL_IF on completion of detach
3738  * @cb_context:	Callback context
3739  *
3740  */
3741 static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
3742 	ol_txrx_vdev_delete_cb callback, void *cb_context)
3743 {
3744 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3745 	struct dp_pdev *pdev = vdev->pdev;
3746 	struct dp_soc *soc = pdev->soc;
3747 	struct dp_neighbour_peer *peer = NULL;
3748 
3749 	/* preconditions */
3750 	qdf_assert(vdev);
3751 
3752 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3753 	/* remove the vdev from its parent pdev's list */
3754 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
3755 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3756 
3757 	if (wlan_op_mode_sta == vdev->opmode)
3758 		dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
3759 
3760 	/*
3761 	 * If Target is hung, flush all peers before detaching vdev
3762 	 * this will free all references held due to missing
3763 	 * unmap commands from Target
3764 	 */
3765 	if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
3766 		dp_vdev_flush_peers(vdev);
3767 
3768 	/*
3769 	 * Use peer_ref_mutex while accessing peer_list, in case
3770 	 * a peer is in the process of being removed from the list.
3771 	 */
3772 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3773 	/* check that the vdev has no peers allocated */
3774 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
3775 		/* debug print - will be removed later */
3776 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
3777 			FL("not deleting vdev object %pK (%pM)"
3778 			"until deletion finishes for all its peers"),
3779 			vdev, vdev->mac_addr.raw);
3780 		/* indicate that the vdev needs to be deleted */
3781 		vdev->delete.pending = 1;
3782 		vdev->delete.callback = callback;
3783 		vdev->delete.context = cb_context;
3784 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3785 		return;
3786 	}
3787 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3788 
3789 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3790 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
3791 		      neighbour_peer_list_elem) {
3792 		QDF_ASSERT(peer->vdev != vdev);
3793 	}
3794 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3795 
3796 	dp_tx_vdev_detach(vdev);
3797 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3798 		FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
3799 
3800 	qdf_mem_free(vdev);
3801 
3802 	if (callback)
3803 		callback(cb_context);
3804 }
3805 
3806 /*
3807  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
3808  * @soc - datapath soc handle
3809  * @peer - datapath peer handle
3810  *
3811  * Delete the AST entries belonging to a peer
3812  */
3813 #ifdef FEATURE_AST
3814 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3815 					      struct dp_peer *peer)
3816 {
3817 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
3818 
3819 	qdf_spin_lock_bh(&soc->ast_lock);
3820 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
3821 		dp_peer_del_ast(soc, ast_entry);
3822 
3823 	peer->self_ast_entry = NULL;
3824 	TAILQ_INIT(&peer->ast_entry_list);
3825 	qdf_spin_unlock_bh(&soc->ast_lock);
3826 }
3827 #else
3828 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3829 					      struct dp_peer *peer)
3830 {
3831 }
3832 #endif
3833 
3834 #if ATH_SUPPORT_WRAP
3835 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
3836 						uint8_t *peer_mac_addr)
3837 {
3838 	struct dp_peer *peer;
3839 
3840 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
3841 				      0, vdev->vdev_id);
3842 	if (!peer)
3843 		return NULL;
3844 
3845 	if (peer->bss_peer)
3846 		return peer;
3847 
3848 	qdf_atomic_dec(&peer->ref_cnt);
3849 	return NULL;
3850 }
3851 #else
3852 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
3853 						uint8_t *peer_mac_addr)
3854 {
3855 	struct dp_peer *peer;
3856 
3857 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
3858 				      0, vdev->vdev_id);
3859 	if (!peer)
3860 		return NULL;
3861 
3862 	if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
3863 		return peer;
3864 
3865 	qdf_atomic_dec(&peer->ref_cnt);
3866 	return NULL;
3867 }
3868 #endif
3869 
3870 /*
3871  * dp_peer_create_wifi3() - attach txrx peer
3872  * @txrx_vdev: Datapath VDEV handle
3873  * @peer_mac_addr: Peer MAC address
3874  *
3875  * Return: DP peeer handle on success, NULL on failure
3876  */
3877 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
3878 		uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
3879 {
3880 	struct dp_peer *peer;
3881 	int i;
3882 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3883 	struct dp_pdev *pdev;
3884 	struct dp_soc *soc;
3885 	struct dp_ast_entry *ast_entry;
3886 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
3887 
3888 	/* preconditions */
3889 	qdf_assert(vdev);
3890 	qdf_assert(peer_mac_addr);
3891 
3892 	pdev = vdev->pdev;
3893 	soc = pdev->soc;
3894 
3895 	/*
3896 	 * If a peer entry with given MAC address already exists,
3897 	 * reuse the peer and reset the state of peer.
3898 	 */
3899 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
3900 
3901 	if (peer) {
3902 		peer->delete_in_progress = false;
3903 
3904 		dp_peer_delete_ast_entries(soc, peer);
3905 
3906 		if ((vdev->opmode == wlan_op_mode_sta) &&
3907 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
3908 		     DP_MAC_ADDR_LEN)) {
3909 			ast_type = CDP_TXRX_AST_TYPE_SELF;
3910 		}
3911 
3912 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
3913 
3914 		/*
3915 		* Control path maintains a node count which is incremented
3916 		* for every new peer create command. Since new peer is not being
3917 		* created and earlier reference is reused here,
3918 		* peer_unref_delete event is sent to control path to
3919 		* increment the count back.
3920 		*/
3921 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
3922 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
3923 				vdev->vdev_id, peer->mac_addr.raw);
3924 		}
3925 		peer->ctrl_peer = ctrl_peer;
3926 
3927 		dp_local_peer_id_alloc(pdev, peer);
3928 		DP_STATS_INIT(peer);
3929 
3930 		return (void *)peer;
3931 	} else {
3932 		/*
3933 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
3934 		 * need to remove the AST entry which was earlier added as a WDS
3935 		 * entry.
3936 		 * If an AST entry exists, but no peer entry exists with a given
3937 		 * MAC addresses, we could deduce it as a WDS entry
3938 		 */
3939 		qdf_spin_lock_bh(&soc->ast_lock);
3940 		ast_entry = dp_peer_ast_hash_find(soc, peer_mac_addr);
3941 		if (ast_entry)
3942 			dp_peer_del_ast(soc, ast_entry);
3943 		qdf_spin_unlock_bh(&soc->ast_lock);
3944 	}
3945 
3946 #ifdef notyet
3947 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
3948 		soc->mempool_ol_ath_peer);
3949 #else
3950 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
3951 #endif
3952 
3953 	if (!peer)
3954 		return NULL; /* failure */
3955 
3956 	qdf_mem_zero(peer, sizeof(struct dp_peer));
3957 
3958 	TAILQ_INIT(&peer->ast_entry_list);
3959 
3960 	/* store provided params */
3961 	peer->vdev = vdev;
3962 	peer->ctrl_peer = ctrl_peer;
3963 
3964 	if ((vdev->opmode == wlan_op_mode_sta) &&
3965 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
3966 			 DP_MAC_ADDR_LEN)) {
3967 		ast_type = CDP_TXRX_AST_TYPE_SELF;
3968 	}
3969 
3970 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
3971 
3972 	qdf_spinlock_create(&peer->peer_info_lock);
3973 
3974 	qdf_mem_copy(
3975 		&peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3976 
3977 	/* TODO: See of rx_opt_proc is really required */
3978 	peer->rx_opt_proc = soc->rx_opt_proc;
3979 
3980 	/* initialize the peer_id */
3981 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3982 		peer->peer_ids[i] = HTT_INVALID_PEER;
3983 
3984 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3985 
3986 	qdf_atomic_init(&peer->ref_cnt);
3987 
3988 	/* keep one reference for attach */
3989 	qdf_atomic_inc(&peer->ref_cnt);
3990 
3991 	/* add this peer into the vdev's list */
3992 	if (wlan_op_mode_sta == vdev->opmode)
3993 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
3994 	else
3995 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
3996 
3997 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3998 
3999 	/* TODO: See if hash based search is required */
4000 	dp_peer_find_hash_add(soc, peer);
4001 
4002 	/* Initialize the peer state */
4003 	peer->state = OL_TXRX_PEER_STATE_DISC;
4004 
4005 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4006 		"vdev %pK created peer %pK (%pM) ref_cnt: %d",
4007 		vdev, peer, peer->mac_addr.raw,
4008 		qdf_atomic_read(&peer->ref_cnt));
4009 	/*
4010 	 * For every peer MAp message search and set if bss_peer
4011 	 */
4012 	if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
4013 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4014 			"vdev bss_peer!!!!");
4015 		peer->bss_peer = 1;
4016 		vdev->vap_bss_peer = peer;
4017 	}
4018 	for (i = 0; i < DP_MAX_TIDS; i++)
4019 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
4020 
4021 	dp_local_peer_id_alloc(pdev, peer);
4022 	DP_STATS_INIT(peer);
4023 	return (void *)peer;
4024 }
4025 
4026 /*
4027  * dp_peer_setup_wifi3() - initialize the peer
4028  * @vdev_hdl: virtual device object
4029  * @peer: Peer object
4030  *
4031  * Return: void
4032  */
4033 static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
4034 {
4035 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
4036 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4037 	struct dp_pdev *pdev;
4038 	struct dp_soc *soc;
4039 	bool hash_based = 0;
4040 	enum cdp_host_reo_dest_ring reo_dest;
4041 
4042 	/* preconditions */
4043 	qdf_assert(vdev);
4044 	qdf_assert(peer);
4045 
4046 	pdev = vdev->pdev;
4047 	soc = pdev->soc;
4048 
4049 	peer->last_assoc_rcvd = 0;
4050 	peer->last_disassoc_rcvd = 0;
4051 	peer->last_deauth_rcvd = 0;
4052 
4053 	/*
4054 	 * hash based steering is disabled for Radios which are offloaded
4055 	 * to NSS
4056 	 */
4057 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
4058 		hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
4059 
4060 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4061 		FL("hash based steering for pdev: %d is %d"),
4062 		pdev->pdev_id, hash_based);
4063 
4064 	/*
4065 	 * Below line of code will ensure the proper reo_dest ring is chosen
4066 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
4067 	 */
4068 	reo_dest = pdev->reo_dest;
4069 
4070 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
4071 		/* TODO: Check the destination ring number to be passed to FW */
4072 		soc->cdp_soc.ol_ops->peer_set_default_routing(
4073 				pdev->ctrl_pdev, peer->mac_addr.raw,
4074 				peer->vdev->vdev_id, hash_based, reo_dest);
4075 	}
4076 
4077 	dp_peer_rx_init(pdev, peer);
4078 	return;
4079 }
4080 
4081 /*
4082  * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
4083  * @vdev_handle: virtual device object
4084  * @htt_pkt_type: type of pkt
4085  *
4086  * Return: void
4087  */
4088 static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
4089 	 enum htt_cmn_pkt_type val)
4090 {
4091 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4092 	vdev->tx_encap_type = val;
4093 }
4094 
4095 /*
4096  * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
4097  * @vdev_handle: virtual device object
4098  * @htt_pkt_type: type of pkt
4099  *
4100  * Return: void
4101  */
4102 static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
4103 	 enum htt_cmn_pkt_type val)
4104 {
4105 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4106 	vdev->rx_decap_type = val;
4107 }
4108 
4109 /*
4110  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
4111  * @txrx_soc: cdp soc handle
4112  * @ac: Access category
4113  * @value: timeout value in millisec
4114  *
4115  * Return: void
4116  */
4117 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
4118 				    uint8_t ac, uint32_t value)
4119 {
4120 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4121 
4122 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
4123 }
4124 
4125 /*
4126  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
4127  * @txrx_soc: cdp soc handle
4128  * @ac: access category
4129  * @value: timeout value in millisec
4130  *
4131  * Return: void
4132  */
4133 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
4134 				    uint8_t ac, uint32_t *value)
4135 {
4136 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4137 
4138 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
4139 }
4140 
4141 /*
4142  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
4143  * @pdev_handle: physical device object
4144  * @val: reo destination ring index (1 - 4)
4145  *
4146  * Return: void
4147  */
4148 static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
4149 	 enum cdp_host_reo_dest_ring val)
4150 {
4151 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4152 
4153 	if (pdev)
4154 		pdev->reo_dest = val;
4155 }
4156 
4157 /*
4158  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
4159  * @pdev_handle: physical device object
4160  *
4161  * Return: reo destination ring index
4162  */
4163 static enum cdp_host_reo_dest_ring
4164 dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
4165 {
4166 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4167 
4168 	if (pdev)
4169 		return pdev->reo_dest;
4170 	else
4171 		return cdp_host_reo_dest_ring_unknown;
4172 }
4173 
4174 #ifdef QCA_SUPPORT_SON
4175 static void dp_son_peer_authorize(struct dp_peer *peer)
4176 {
4177 	struct dp_soc *soc;
4178 	soc = peer->vdev->pdev->soc;
4179 	peer->peer_bs_inact_flag = 0;
4180 	peer->peer_bs_inact = soc->pdev_bs_inact_reload;
4181 	return;
4182 }
4183 #else
4184 static void dp_son_peer_authorize(struct dp_peer *peer)
4185 {
4186 	return;
4187 }
4188 #endif
4189 /*
4190  * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
4191  * @pdev_handle: device object
4192  * @val: value to be set
4193  *
4194  * Return: void
4195  */
4196 static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
4197 	 uint32_t val)
4198 {
4199 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4200 
4201 	/* Enable/Disable smart mesh filtering. This flag will be checked
4202 	 * during rx processing to check if packets are from NAC clients.
4203 	 */
4204 	pdev->filter_neighbour_peers = val;
4205 	return 0;
4206 }
4207 
4208 /*
4209  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
4210  * address for smart mesh filtering
4211  * @vdev_handle: virtual device object
4212  * @cmd: Add/Del command
4213  * @macaddr: nac client mac address
4214  *
4215  * Return: void
4216  */
4217 static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
4218 					    uint32_t cmd, uint8_t *macaddr)
4219 {
4220 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4221 	struct dp_pdev *pdev = vdev->pdev;
4222 	struct dp_neighbour_peer *peer = NULL;
4223 
4224 	if (!macaddr)
4225 		goto fail0;
4226 
4227 	/* Store address of NAC (neighbour peer) which will be checked
4228 	 * against TA of received packets.
4229 	 */
4230 	if (cmd == DP_NAC_PARAM_ADD) {
4231 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
4232 				sizeof(*peer));
4233 
4234 		if (!peer) {
4235 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4236 				FL("DP neighbour peer node memory allocation failed"));
4237 			goto fail0;
4238 		}
4239 
4240 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
4241 			macaddr, DP_MAC_ADDR_LEN);
4242 		peer->vdev = vdev;
4243 
4244 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4245 
4246 		/* add this neighbour peer into the list */
4247 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
4248 				neighbour_peer_list_elem);
4249 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4250 
4251 		/* first neighbour */
4252 		if (!pdev->neighbour_peers_added) {
4253 			if (!pdev->mcopy_mode && !pdev->enhanced_stats_en)
4254 				dp_ppdu_ring_cfg(pdev);
4255 			pdev->neighbour_peers_added = true;
4256 		}
4257 		return 1;
4258 
4259 	} else if (cmd == DP_NAC_PARAM_DEL) {
4260 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4261 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
4262 				neighbour_peer_list_elem) {
4263 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
4264 				macaddr, DP_MAC_ADDR_LEN)) {
4265 				/* delete this peer from the list */
4266 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
4267 					peer, neighbour_peer_list_elem);
4268 				qdf_mem_free(peer);
4269 				break;
4270 			}
4271 		}
4272 		/* last neighbour deleted */
4273 		if (TAILQ_EMPTY(&pdev->neighbour_peers_list))
4274 			pdev->neighbour_peers_added = false;
4275 
4276 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4277 
4278 		if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
4279 		    !pdev->enhanced_stats_en)
4280 			dp_ppdu_ring_reset(pdev);
4281 		return 1;
4282 
4283 	}
4284 
4285 fail0:
4286 	return 0;
4287 }
4288 
4289 /*
4290  * dp_get_sec_type() - Get the security type
4291  * @peer:		Datapath peer handle
4292  * @sec_idx:    Security id (mcast, ucast)
4293  *
4294  * return sec_type: Security type
4295  */
4296 static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
4297 {
4298 	struct dp_peer *dpeer = (struct dp_peer *)peer;
4299 
4300 	return dpeer->security[sec_idx].sec_type;
4301 }
4302 
4303 /*
4304  * dp_peer_authorize() - authorize txrx peer
4305  * @peer_handle:		Datapath peer handle
4306  * @authorize
4307  *
4308  */
4309 static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
4310 {
4311 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4312 	struct dp_soc *soc;
4313 
4314 	if (peer != NULL) {
4315 		soc = peer->vdev->pdev->soc;
4316 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
4317 		dp_son_peer_authorize(peer);
4318 		peer->authorize = authorize ? 1 : 0;
4319 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4320 	}
4321 }
4322 
4323 #ifdef QCA_SUPPORT_SON
4324 /*
4325  * dp_txrx_update_inact_threshold() - Update inact timer threshold
4326  * @pdev_handle: Device handle
4327  * @new_threshold : updated threshold value
4328  *
4329  */
4330 static void
4331 dp_txrx_update_inact_threshold(struct cdp_pdev *pdev_handle,
4332 			       u_int16_t new_threshold)
4333 {
4334 	struct dp_vdev *vdev;
4335 	struct dp_peer *peer;
4336 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4337 	struct dp_soc *soc = pdev->soc;
4338 	u_int16_t old_threshold = soc->pdev_bs_inact_reload;
4339 
4340 	if (old_threshold == new_threshold)
4341 		return;
4342 
4343 	soc->pdev_bs_inact_reload = new_threshold;
4344 
4345 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4346 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4347 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4348 		if (vdev->opmode != wlan_op_mode_ap)
4349 			continue;
4350 
4351 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4352 			if (!peer->authorize)
4353 				continue;
4354 
4355 			if (old_threshold - peer->peer_bs_inact >=
4356 					new_threshold) {
4357 				dp_mark_peer_inact((void *)peer, true);
4358 				peer->peer_bs_inact = 0;
4359 			} else {
4360 				peer->peer_bs_inact = new_threshold -
4361 					(old_threshold - peer->peer_bs_inact);
4362 			}
4363 		}
4364 	}
4365 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4366 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4367 }
4368 
4369 /**
4370  * dp_txrx_reset_inact_count(): Reset inact count
4371  * @pdev_handle - device handle
4372  *
4373  * Return: void
4374  */
4375 static void
4376 dp_txrx_reset_inact_count(struct cdp_pdev *pdev_handle)
4377 {
4378 	struct dp_vdev *vdev = NULL;
4379 	struct dp_peer *peer = NULL;
4380 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4381 	struct dp_soc *soc = pdev->soc;
4382 
4383 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4384 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4385 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4386 		if (vdev->opmode != wlan_op_mode_ap)
4387 			continue;
4388 
4389 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4390 			if (!peer->authorize)
4391 				continue;
4392 
4393 			peer->peer_bs_inact = soc->pdev_bs_inact_reload;
4394 		}
4395 	}
4396 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4397 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4398 }
4399 
4400 /**
4401  * dp_set_inact_params(): set inactivity params
4402  * @pdev_handle - device handle
4403  * @inact_check_interval - inactivity interval
4404  * @inact_normal - Inactivity normal
4405  * @inact_overload - Inactivity overload
4406  *
4407  * Return: bool
4408  */
4409 bool dp_set_inact_params(struct cdp_pdev *pdev_handle,
4410 			 u_int16_t inact_check_interval,
4411 			 u_int16_t inact_normal, u_int16_t inact_overload)
4412 {
4413 	struct dp_soc *soc;
4414 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4415 
4416 	if (!pdev)
4417 		return false;
4418 
4419 	soc = pdev->soc;
4420 	if (!soc)
4421 		return false;
4422 
4423 	soc->pdev_bs_inact_interval = inact_check_interval;
4424 	soc->pdev_bs_inact_normal = inact_normal;
4425 	soc->pdev_bs_inact_overload = inact_overload;
4426 
4427 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4428 					soc->pdev_bs_inact_normal);
4429 
4430 	return true;
4431 }
4432 
4433 /**
4434  * dp_start_inact_timer(): Inactivity timer start
4435  * @pdev_handle - device handle
4436  * @enable - Inactivity timer start/stop
4437  *
4438  * Return: bool
4439  */
4440 bool dp_start_inact_timer(struct cdp_pdev *pdev_handle, bool enable)
4441 {
4442 	struct dp_soc *soc;
4443 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4444 
4445 	if (!pdev)
4446 		return false;
4447 
4448 	soc = pdev->soc;
4449 	if (!soc)
4450 		return false;
4451 
4452 	if (enable) {
4453 		dp_txrx_reset_inact_count((struct cdp_pdev *)pdev);
4454 		qdf_timer_mod(&soc->pdev_bs_inact_timer,
4455 			      soc->pdev_bs_inact_interval * 1000);
4456 	} else {
4457 		qdf_timer_stop(&soc->pdev_bs_inact_timer);
4458 	}
4459 
4460 	return true;
4461 }
4462 
4463 /**
4464  * dp_set_overload(): Set inactivity overload
4465  * @pdev_handle - device handle
4466  * @overload - overload status
4467  *
4468  * Return: void
4469  */
4470 void dp_set_overload(struct cdp_pdev *pdev_handle, bool overload)
4471 {
4472 	struct dp_soc *soc;
4473 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4474 
4475 	if (!pdev)
4476 		return;
4477 
4478 	soc = pdev->soc;
4479 	if (!soc)
4480 		return;
4481 
4482 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4483 			overload ? soc->pdev_bs_inact_overload :
4484 			soc->pdev_bs_inact_normal);
4485 }
4486 
4487 /**
4488  * dp_peer_is_inact(): check whether peer is inactive
4489  * @peer_handle - datapath peer handle
4490  *
4491  * Return: bool
4492  */
4493 bool dp_peer_is_inact(void *peer_handle)
4494 {
4495 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4496 
4497 	if (!peer)
4498 		return false;
4499 
4500 	return peer->peer_bs_inact_flag == 1;
4501 }
4502 
4503 /**
4504  * dp_init_inact_timer: initialize the inact timer
4505  * @soc - SOC handle
4506  *
4507  * Return: void
4508  */
4509 void dp_init_inact_timer(struct dp_soc *soc)
4510 {
4511 	qdf_timer_init(soc->osdev, &soc->pdev_bs_inact_timer,
4512 		dp_txrx_peer_find_inact_timeout_handler,
4513 		(void *)soc, QDF_TIMER_TYPE_WAKE_APPS);
4514 }
4515 
4516 #else
4517 
4518 bool dp_set_inact_params(struct cdp_pdev *pdev, u_int16_t inact_check_interval,
4519 			 u_int16_t inact_normal, u_int16_t inact_overload)
4520 {
4521 	return false;
4522 }
4523 
4524 bool dp_start_inact_timer(struct cdp_pdev *pdev, bool enable)
4525 {
4526 	return false;
4527 }
4528 
4529 void dp_set_overload(struct cdp_pdev *pdev, bool overload)
4530 {
4531 	return;
4532 }
4533 
4534 void dp_init_inact_timer(struct dp_soc *soc)
4535 {
4536 	return;
4537 }
4538 
4539 bool dp_peer_is_inact(void *peer)
4540 {
4541 	return false;
4542 }
4543 #endif
4544 
4545 /*
4546  * dp_peer_unref_delete() - unref and delete peer
4547  * @peer_handle:		Datapath peer handle
4548  *
4549  */
4550 void dp_peer_unref_delete(void *peer_handle)
4551 {
4552 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4553 	struct dp_peer *bss_peer = NULL;
4554 	struct dp_vdev *vdev = peer->vdev;
4555 	struct dp_pdev *pdev = vdev->pdev;
4556 	struct dp_soc *soc = pdev->soc;
4557 	struct dp_peer *tmppeer;
4558 	int found = 0;
4559 	uint16_t peer_id;
4560 	uint16_t vdev_id;
4561 
4562 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4563 		  "%s: peer %pK ref_cnt(before decrement): %d\n", __func__,
4564 		  peer, qdf_atomic_read(&peer->ref_cnt));
4565 	/*
4566 	 * Hold the lock all the way from checking if the peer ref count
4567 	 * is zero until the peer references are removed from the hash
4568 	 * table and vdev list (if the peer ref count is zero).
4569 	 * This protects against a new HL tx operation starting to use the
4570 	 * peer object just after this function concludes it's done being used.
4571 	 * Furthermore, the lock needs to be held while checking whether the
4572 	 * vdev's list of peers is empty, to make sure that list is not modified
4573 	 * concurrently with the empty check.
4574 	 */
4575 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4576 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
4577 		peer_id = peer->peer_ids[0];
4578 		vdev_id = vdev->vdev_id;
4579 
4580 		/*
4581 		 * Make sure that the reference to the peer in
4582 		 * peer object map is removed
4583 		 */
4584 		if (peer_id != HTT_INVALID_PEER)
4585 			soc->peer_id_to_obj_map[peer_id] = NULL;
4586 
4587 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4588 			"Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
4589 
4590 		/* remove the reference to the peer from the hash table */
4591 		dp_peer_find_hash_remove(soc, peer);
4592 
4593 		qdf_spin_lock_bh(&soc->ast_lock);
4594 		if (peer->self_ast_entry) {
4595 			dp_peer_del_ast(soc, peer->self_ast_entry);
4596 			peer->self_ast_entry = NULL;
4597 		}
4598 		qdf_spin_unlock_bh(&soc->ast_lock);
4599 
4600 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
4601 			if (tmppeer == peer) {
4602 				found = 1;
4603 				break;
4604 			}
4605 		}
4606 		if (found) {
4607 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
4608 				peer_list_elem);
4609 		} else {
4610 			/*Ignoring the remove operation as peer not found*/
4611 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4612 				  "peer:%pK not found in vdev:%pK peerlist:%pK",
4613 				  peer, vdev, &peer->vdev->peer_list);
4614 		}
4615 
4616 		/* cleanup the peer data */
4617 		dp_peer_cleanup(vdev, peer);
4618 
4619 		/* check whether the parent vdev has no peers left */
4620 		if (TAILQ_EMPTY(&vdev->peer_list)) {
4621 			/*
4622 			 * Now that there are no references to the peer, we can
4623 			 * release the peer reference lock.
4624 			 */
4625 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4626 			/*
4627 			 * Check if the parent vdev was waiting for its peers
4628 			 * to be deleted, in order for it to be deleted too.
4629 			 */
4630 			if (vdev->delete.pending) {
4631 				ol_txrx_vdev_delete_cb vdev_delete_cb =
4632 					vdev->delete.callback;
4633 				void *vdev_delete_context =
4634 					vdev->delete.context;
4635 
4636 				QDF_TRACE(QDF_MODULE_ID_DP,
4637 					QDF_TRACE_LEVEL_INFO_HIGH,
4638 					FL("deleting vdev object %pK (%pM)"
4639 					" - its last peer is done"),
4640 					vdev, vdev->mac_addr.raw);
4641 				/* all peers are gone, go ahead and delete it */
4642 				dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
4643 								FLOW_TYPE_VDEV,
4644 								vdev_id);
4645 				dp_tx_vdev_detach(vdev);
4646 				QDF_TRACE(QDF_MODULE_ID_DP,
4647 					QDF_TRACE_LEVEL_INFO_HIGH,
4648 					FL("deleting vdev object %pK (%pM)"),
4649 					vdev, vdev->mac_addr.raw);
4650 
4651 				qdf_mem_free(vdev);
4652 				vdev = NULL;
4653 				if (vdev_delete_cb)
4654 					vdev_delete_cb(vdev_delete_context);
4655 			}
4656 		} else {
4657 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4658 		}
4659 
4660 		if (vdev) {
4661 			if (vdev->vap_bss_peer == peer) {
4662 				vdev->vap_bss_peer = NULL;
4663 			}
4664 		}
4665 
4666 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
4667 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
4668 					vdev_id, peer->mac_addr.raw);
4669 		}
4670 
4671 		if (!vdev || !vdev->vap_bss_peer) {
4672 			goto free_peer;
4673 		}
4674 
4675 #ifdef notyet
4676 		qdf_mempool_free(soc->osdev, soc->mempool_ol_ath_peer, peer);
4677 #else
4678 		bss_peer = vdev->vap_bss_peer;
4679 		DP_UPDATE_STATS(vdev, peer);
4680 
4681 free_peer:
4682 		qdf_mem_free(peer);
4683 
4684 #endif
4685 	} else {
4686 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4687 	}
4688 }
4689 
4690 /*
4691  * dp_peer_detach_wifi3() – Detach txrx peer
4692  * @peer_handle: Datapath peer handle
4693  * @bitmap: bitmap indicating special handling of request.
4694  *
4695  */
4696 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
4697 {
4698 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4699 
4700 	/* redirect the peer's rx delivery function to point to a
4701 	 * discard func
4702 	 */
4703 
4704 	peer->rx_opt_proc = dp_rx_discard;
4705 	peer->ctrl_peer = NULL;
4706 
4707 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4708 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
4709 
4710 	dp_local_peer_id_free(peer->vdev->pdev, peer);
4711 	qdf_spinlock_destroy(&peer->peer_info_lock);
4712 
4713 	/*
4714 	 * Remove the reference added during peer_attach.
4715 	 * The peer will still be left allocated until the
4716 	 * PEER_UNMAP message arrives to remove the other
4717 	 * reference, added by the PEER_MAP message.
4718 	 */
4719 	dp_peer_unref_delete(peer_handle);
4720 }
4721 
4722 /*
4723  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
4724  * @peer_handle:		Datapath peer handle
4725  *
4726  */
4727 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
4728 {
4729 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4730 	return vdev->mac_addr.raw;
4731 }
4732 
4733 /*
4734  * dp_vdev_set_wds() - Enable per packet stats
4735  * @vdev_handle: DP VDEV handle
4736  * @val: value
4737  *
4738  * Return: none
4739  */
4740 static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
4741 {
4742 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4743 
4744 	vdev->wds_enabled = val;
4745 	return 0;
4746 }
4747 
4748 /*
4749  * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
4750  * @peer_handle:		Datapath peer handle
4751  *
4752  */
4753 static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
4754 						uint8_t vdev_id)
4755 {
4756 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
4757 	struct dp_vdev *vdev = NULL;
4758 
4759 	if (qdf_unlikely(!pdev))
4760 		return NULL;
4761 
4762 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4763 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4764 		if (vdev->vdev_id == vdev_id)
4765 			break;
4766 	}
4767 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4768 
4769 	return (struct cdp_vdev *)vdev;
4770 }
4771 
4772 static int dp_get_opmode(struct cdp_vdev *vdev_handle)
4773 {
4774 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4775 
4776 	return vdev->opmode;
4777 }
4778 
4779 static
4780 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_vdev *pvdev,
4781 					  ol_txrx_rx_fp *stack_fn_p,
4782 					  ol_osif_vdev_handle *osif_vdev_p)
4783 {
4784 	struct dp_vdev *vdev = dp_get_dp_vdev_from_cdp_vdev(pvdev);
4785 
4786 	qdf_assert(vdev);
4787 	*stack_fn_p = vdev->osif_rx_stack;
4788 	*osif_vdev_p = vdev->osif_vdev;
4789 }
4790 
4791 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
4792 {
4793 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4794 	struct dp_pdev *pdev = vdev->pdev;
4795 
4796 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
4797 }
4798 
4799 /**
4800  * dp_reset_monitor_mode() - Disable monitor mode
4801  * @pdev_handle: Datapath PDEV handle
4802  *
4803  * Return: 0 on success, not 0 on failure
4804  */
4805 static int dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
4806 {
4807 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4808 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4809 	struct dp_soc *soc = pdev->soc;
4810 	uint8_t pdev_id;
4811 	int mac_id;
4812 
4813 	pdev_id = pdev->pdev_id;
4814 	soc = pdev->soc;
4815 
4816 	qdf_spin_lock_bh(&pdev->mon_lock);
4817 
4818 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4819 
4820 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4821 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4822 
4823 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4824 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4825 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4826 
4827 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4828 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4829 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4830 	}
4831 
4832 	pdev->monitor_vdev = NULL;
4833 
4834 	qdf_spin_unlock_bh(&pdev->mon_lock);
4835 
4836 	return 0;
4837 }
4838 
4839 /**
4840  * dp_set_nac() - set peer_nac
4841  * @peer_handle: Datapath PEER handle
4842  *
4843  * Return: void
4844  */
4845 static void dp_set_nac(struct cdp_peer *peer_handle)
4846 {
4847 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4848 
4849 	peer->nac = 1;
4850 }
4851 
4852 /**
4853  * dp_get_tx_pending() - read pending tx
4854  * @pdev_handle: Datapath PDEV handle
4855  *
4856  * Return: outstanding tx
4857  */
4858 static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
4859 {
4860 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4861 
4862 	return qdf_atomic_read(&pdev->num_tx_outstanding);
4863 }
4864 
4865 /**
4866  * dp_get_peer_mac_from_peer_id() - get peer mac
4867  * @pdev_handle: Datapath PDEV handle
4868  * @peer_id: Peer ID
4869  * @peer_mac: MAC addr of PEER
4870  *
4871  * Return: void
4872  */
4873 static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
4874 	uint32_t peer_id, uint8_t *peer_mac)
4875 {
4876 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4877 	struct dp_peer *peer;
4878 
4879 	if (pdev && peer_mac) {
4880 		peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
4881 		if (peer && peer->mac_addr.raw) {
4882 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
4883 					DP_MAC_ADDR_LEN);
4884 		}
4885 	}
4886 }
4887 
4888 /**
4889  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
4890  * @vdev_handle: Datapath VDEV handle
4891  * @smart_monitor: Flag to denote if its smart monitor mode
4892  *
4893  * Return: 0 on success, not 0 on failure
4894  */
4895 static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
4896 		uint8_t smart_monitor)
4897 {
4898 	/* Many monitor VAPs can exists in a system but only one can be up at
4899 	 * anytime
4900 	 */
4901 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4902 	struct dp_pdev *pdev;
4903 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4904 	struct dp_soc *soc;
4905 	uint8_t pdev_id;
4906 	int mac_id;
4907 
4908 	qdf_assert(vdev);
4909 
4910 	pdev = vdev->pdev;
4911 	pdev_id = pdev->pdev_id;
4912 	soc = pdev->soc;
4913 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4914 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
4915 		pdev, pdev_id, soc, vdev);
4916 
4917 	/*Check if current pdev's monitor_vdev exists */
4918 	if (pdev->monitor_vdev) {
4919 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4920 			"vdev=%pK", vdev);
4921 		qdf_assert(vdev);
4922 	}
4923 
4924 	pdev->monitor_vdev = vdev;
4925 
4926 	/* If smart monitor mode, do not configure monitor ring */
4927 	if (smart_monitor)
4928 		return QDF_STATUS_SUCCESS;
4929 
4930 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4931 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
4932 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4933 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4934 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4935 		pdev->mo_data_filter);
4936 
4937 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4938 
4939 	htt_tlv_filter.mpdu_start = 1;
4940 	htt_tlv_filter.msdu_start = 1;
4941 	htt_tlv_filter.packet = 1;
4942 	htt_tlv_filter.msdu_end = 1;
4943 	htt_tlv_filter.mpdu_end = 1;
4944 	htt_tlv_filter.packet_header = 1;
4945 	htt_tlv_filter.attention = 1;
4946 	htt_tlv_filter.ppdu_start = 0;
4947 	htt_tlv_filter.ppdu_end = 0;
4948 	htt_tlv_filter.ppdu_end_user_stats = 0;
4949 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4950 	htt_tlv_filter.ppdu_end_status_done = 0;
4951 	htt_tlv_filter.header_per_msdu = 1;
4952 	htt_tlv_filter.enable_fp =
4953 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4954 	htt_tlv_filter.enable_md = 0;
4955 	htt_tlv_filter.enable_mo =
4956 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4957 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4958 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4959 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4960 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4961 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4962 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4963 
4964 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4965 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4966 
4967 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4968 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4969 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4970 	}
4971 
4972 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4973 
4974 	htt_tlv_filter.mpdu_start = 1;
4975 	htt_tlv_filter.msdu_start = 0;
4976 	htt_tlv_filter.packet = 0;
4977 	htt_tlv_filter.msdu_end = 0;
4978 	htt_tlv_filter.mpdu_end = 0;
4979 	htt_tlv_filter.attention = 0;
4980 	htt_tlv_filter.ppdu_start = 1;
4981 	htt_tlv_filter.ppdu_end = 1;
4982 	htt_tlv_filter.ppdu_end_user_stats = 1;
4983 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4984 	htt_tlv_filter.ppdu_end_status_done = 1;
4985 	htt_tlv_filter.enable_fp = 1;
4986 	htt_tlv_filter.enable_md = 0;
4987 	htt_tlv_filter.enable_mo = 1;
4988 	if (pdev->mcopy_mode) {
4989 		htt_tlv_filter.packet_header = 1;
4990 	}
4991 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
4992 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
4993 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
4994 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
4995 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
4996 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
4997 
4998 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4999 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5000 						pdev->pdev_id);
5001 
5002 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5003 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5004 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5005 	}
5006 
5007 	return QDF_STATUS_SUCCESS;
5008 }
5009 
5010 /**
5011  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
5012  * @pdev_handle: Datapath PDEV handle
5013  * @filter_val: Flag to select Filter for monitor mode
5014  * Return: 0 on success, not 0 on failure
5015  */
5016 static int dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
5017 	struct cdp_monitor_filter *filter_val)
5018 {
5019 	/* Many monitor VAPs can exists in a system but only one can be up at
5020 	 * anytime
5021 	 */
5022 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5023 	struct dp_vdev *vdev = pdev->monitor_vdev;
5024 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5025 	struct dp_soc *soc;
5026 	uint8_t pdev_id;
5027 	int mac_id;
5028 
5029 	pdev_id = pdev->pdev_id;
5030 	soc = pdev->soc;
5031 
5032 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
5033 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
5034 		pdev, pdev_id, soc, vdev);
5035 
5036 	/*Check if current pdev's monitor_vdev exists */
5037 	if (!pdev->monitor_vdev) {
5038 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5039 			"vdev=%pK", vdev);
5040 		qdf_assert(vdev);
5041 	}
5042 
5043 	/* update filter mode, type in pdev structure */
5044 	pdev->mon_filter_mode = filter_val->mode;
5045 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
5046 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
5047 	pdev->fp_data_filter = filter_val->fp_data;
5048 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
5049 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
5050 	pdev->mo_data_filter = filter_val->mo_data;
5051 
5052 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5053 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
5054 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
5055 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
5056 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
5057 		pdev->mo_data_filter);
5058 
5059 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5060 
5061 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5062 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5063 
5064 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5065 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
5066 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
5067 
5068 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5069 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5070 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5071 	}
5072 
5073 	htt_tlv_filter.mpdu_start = 1;
5074 	htt_tlv_filter.msdu_start = 1;
5075 	htt_tlv_filter.packet = 1;
5076 	htt_tlv_filter.msdu_end = 1;
5077 	htt_tlv_filter.mpdu_end = 1;
5078 	htt_tlv_filter.packet_header = 1;
5079 	htt_tlv_filter.attention = 1;
5080 	htt_tlv_filter.ppdu_start = 0;
5081 	htt_tlv_filter.ppdu_end = 0;
5082 	htt_tlv_filter.ppdu_end_user_stats = 0;
5083 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
5084 	htt_tlv_filter.ppdu_end_status_done = 0;
5085 	htt_tlv_filter.header_per_msdu = 1;
5086 	htt_tlv_filter.enable_fp =
5087 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
5088 	htt_tlv_filter.enable_md = 0;
5089 	htt_tlv_filter.enable_mo =
5090 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
5091 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
5092 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
5093 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
5094 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
5095 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
5096 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
5097 
5098 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5099 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5100 
5101 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5102 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
5103 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
5104 	}
5105 
5106 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5107 
5108 	htt_tlv_filter.mpdu_start = 1;
5109 	htt_tlv_filter.msdu_start = 0;
5110 	htt_tlv_filter.packet = 0;
5111 	htt_tlv_filter.msdu_end = 0;
5112 	htt_tlv_filter.mpdu_end = 0;
5113 	htt_tlv_filter.attention = 0;
5114 	htt_tlv_filter.ppdu_start = 1;
5115 	htt_tlv_filter.ppdu_end = 1;
5116 	htt_tlv_filter.ppdu_end_user_stats = 1;
5117 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5118 	htt_tlv_filter.ppdu_end_status_done = 1;
5119 	htt_tlv_filter.enable_fp = 1;
5120 	htt_tlv_filter.enable_md = 0;
5121 	htt_tlv_filter.enable_mo = 1;
5122 	if (pdev->mcopy_mode) {
5123 		htt_tlv_filter.packet_header = 1;
5124 	}
5125 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5126 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5127 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5128 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5129 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5130 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
5131 
5132 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5133 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5134 						pdev->pdev_id);
5135 
5136 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5137 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5138 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5139 	}
5140 
5141 	return QDF_STATUS_SUCCESS;
5142 }
5143 
5144 /**
5145  * dp_get_pdev_id_frm_pdev() - get pdev_id
5146  * @pdev_handle: Datapath PDEV handle
5147  *
5148  * Return: pdev_id
5149  */
5150 static
5151 uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
5152 {
5153 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5154 
5155 	return pdev->pdev_id;
5156 }
5157 
5158 /**
5159  * dp_pdev_set_chan_noise_floor() - set channel noise floor
5160  * @pdev_handle: Datapath PDEV handle
5161  * @chan_noise_floor: Channel Noise Floor
5162  *
5163  * Return: void
5164  */
5165 static
5166 void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
5167 				  int16_t chan_noise_floor)
5168 {
5169 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5170 
5171 	pdev->chan_noise_floor = chan_noise_floor;
5172 }
5173 
5174 /**
5175  * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
5176  * @vdev_handle: Datapath VDEV handle
5177  * Return: true on ucast filter flag set
5178  */
5179 static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
5180 {
5181 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5182 	struct dp_pdev *pdev;
5183 
5184 	pdev = vdev->pdev;
5185 
5186 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
5187 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
5188 		return true;
5189 
5190 	return false;
5191 }
5192 
5193 /**
5194  * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
5195  * @vdev_handle: Datapath VDEV handle
5196  * Return: true on mcast filter flag set
5197  */
5198 static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
5199 {
5200 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5201 	struct dp_pdev *pdev;
5202 
5203 	pdev = vdev->pdev;
5204 
5205 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
5206 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
5207 		return true;
5208 
5209 	return false;
5210 }
5211 
5212 /**
5213  * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
5214  * @vdev_handle: Datapath VDEV handle
5215  * Return: true on non data filter flag set
5216  */
5217 static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
5218 {
5219 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5220 	struct dp_pdev *pdev;
5221 
5222 	pdev = vdev->pdev;
5223 
5224 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
5225 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
5226 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
5227 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
5228 			return true;
5229 		}
5230 	}
5231 
5232 	return false;
5233 }
5234 
5235 #ifdef MESH_MODE_SUPPORT
5236 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
5237 {
5238 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5239 
5240 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5241 		FL("val %d"), val);
5242 	vdev->mesh_vdev = val;
5243 }
5244 
5245 /*
5246  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
5247  * @vdev_hdl: virtual device object
5248  * @val: value to be set
5249  *
5250  * Return: void
5251  */
5252 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
5253 {
5254 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5255 
5256 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5257 		FL("val %d"), val);
5258 	vdev->mesh_rx_filter = val;
5259 }
5260 #endif
5261 
5262 /*
5263  * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
5264  * Current scope is bar received count
5265  *
5266  * @pdev_handle: DP_PDEV handle
5267  *
5268  * Return: void
5269  */
5270 #define STATS_PROC_TIMEOUT        (HZ/1000)
5271 
5272 static void
5273 dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
5274 {
5275 	struct dp_vdev *vdev;
5276 	struct dp_peer *peer;
5277 	uint32_t waitcnt;
5278 
5279 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5280 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5281 			if (!peer) {
5282 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5283 					FL("DP Invalid Peer refernce"));
5284 				return;
5285 			}
5286 
5287 			if (peer->delete_in_progress) {
5288 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5289 					FL("DP Peer deletion in progress"));
5290 				continue;
5291 			}
5292 
5293 			qdf_atomic_inc(&peer->ref_cnt);
5294 			waitcnt = 0;
5295 			dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
5296 			while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
5297 				&& waitcnt < 10) {
5298 				schedule_timeout_interruptible(
5299 						STATS_PROC_TIMEOUT);
5300 				waitcnt++;
5301 			}
5302 			qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
5303 			dp_peer_unref_delete(peer);
5304 		}
5305 	}
5306 }
5307 
5308 /**
5309  * dp_rx_bar_stats_cb(): BAR received stats callback
5310  * @soc: SOC handle
5311  * @cb_ctxt: Call back context
5312  * @reo_status: Reo status
5313  *
5314  * return: void
5315  */
5316 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
5317 	union hal_reo_status *reo_status)
5318 {
5319 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
5320 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
5321 
5322 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
5323 		DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
5324 			queue_status->header.status);
5325 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
5326 		return;
5327 	}
5328 
5329 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
5330 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
5331 
5332 }
5333 
5334 /**
5335  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
5336  * @vdev: DP VDEV handle
5337  *
5338  * return: void
5339  */
5340 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
5341 			     struct cdp_vdev_stats *vdev_stats)
5342 {
5343 	struct dp_peer *peer = NULL;
5344 	struct dp_soc *soc = vdev->pdev->soc;
5345 
5346 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
5347 
5348 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
5349 		dp_update_vdev_stats(vdev_stats, peer);
5350 
5351 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5352 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
5353 			&vdev->stats, (uint16_t) vdev->vdev_id,
5354 			UPDATE_VDEV_STATS);
5355 
5356 }
5357 
5358 /**
5359  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
5360  * @pdev: DP PDEV handle
5361  *
5362  * return: void
5363  */
5364 static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
5365 {
5366 	struct dp_vdev *vdev = NULL;
5367 	struct dp_soc *soc = pdev->soc;
5368 	struct cdp_vdev_stats *vdev_stats =
5369 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
5370 
5371 	if (!vdev_stats) {
5372 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5373 			  "DP alloc failure - unable to get alloc vdev stats");
5374 		return;
5375 	}
5376 
5377 	qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
5378 	qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
5379 	qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
5380 
5381 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5382 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5383 
5384 		dp_aggregate_vdev_stats(vdev, vdev_stats);
5385 		dp_update_pdev_stats(pdev, vdev_stats);
5386 
5387 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
5388 
5389 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
5390 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
5391 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
5392 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
5393 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
5394 		DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
5395 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
5396 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host.num);
5397 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
5398 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host.num);
5399 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
5400 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
5401 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
5402 		DP_STATS_AGGR(pdev, vdev,
5403 				tx_i.mcast_en.dropped_map_error);
5404 		DP_STATS_AGGR(pdev, vdev,
5405 				tx_i.mcast_en.dropped_self_mac);
5406 		DP_STATS_AGGR(pdev, vdev,
5407 				tx_i.mcast_en.dropped_send_fail);
5408 		DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
5409 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
5410 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
5411 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
5412 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na.num);
5413 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
5414 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified);
5415 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw);
5416 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.exception_fw);
5417 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.completion_fw);
5418 
5419 		pdev->stats.tx_i.dropped.dropped_pkt.num =
5420 			pdev->stats.tx_i.dropped.dma_error +
5421 			pdev->stats.tx_i.dropped.ring_full +
5422 			pdev->stats.tx_i.dropped.enqueue_fail +
5423 			pdev->stats.tx_i.dropped.desc_na.num +
5424 			pdev->stats.tx_i.dropped.res_full;
5425 
5426 		pdev->stats.tx.last_ack_rssi =
5427 			vdev->stats.tx.last_ack_rssi;
5428 		pdev->stats.tx_i.tso.num_seg =
5429 			vdev->stats.tx_i.tso.num_seg;
5430 	}
5431 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5432 	qdf_mem_free(vdev_stats);
5433 
5434 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5435 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
5436 				&pdev->stats, pdev->pdev_id, UPDATE_PDEV_STATS);
5437 
5438 }
5439 
5440 /**
5441  * dp_vdev_getstats() - get vdev packet level stats
5442  * @vdev_handle: Datapath VDEV handle
5443  * @stats: cdp network device stats structure
5444  *
5445  * Return: void
5446  */
5447 static void dp_vdev_getstats(void *vdev_handle,
5448 		struct cdp_dev_stats *stats)
5449 {
5450 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5451 	struct cdp_vdev_stats *vdev_stats =
5452 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
5453 
5454 	if (!vdev_stats) {
5455 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5456 			  "DP alloc failure - unable to get alloc vdev stats");
5457 		return;
5458 	}
5459 
5460 	dp_aggregate_vdev_stats(vdev, vdev_stats);
5461 
5462 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
5463 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
5464 
5465 	stats->tx_errors = vdev_stats->tx.tx_failed +
5466 		vdev_stats->tx_i.dropped.dropped_pkt.num;
5467 	stats->tx_dropped = stats->tx_errors;
5468 
5469 	stats->rx_packets = vdev_stats->rx.unicast.num +
5470 		vdev_stats->rx.multicast.num +
5471 		vdev_stats->rx.bcast.num;
5472 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
5473 		vdev_stats->rx.multicast.bytes +
5474 		vdev_stats->rx.bcast.bytes;
5475 
5476 }
5477 
5478 
5479 /**
5480  * dp_pdev_getstats() - get pdev packet level stats
5481  * @pdev_handle: Datapath PDEV handle
5482  * @stats: cdp network device stats structure
5483  *
5484  * Return: void
5485  */
5486 static void dp_pdev_getstats(void *pdev_handle,
5487 		struct cdp_dev_stats *stats)
5488 {
5489 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5490 
5491 	dp_aggregate_pdev_stats(pdev);
5492 
5493 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
5494 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
5495 
5496 	stats->tx_errors = pdev->stats.tx.tx_failed +
5497 		pdev->stats.tx_i.dropped.dropped_pkt.num;
5498 	stats->tx_dropped = stats->tx_errors;
5499 
5500 	stats->rx_packets = pdev->stats.rx.unicast.num +
5501 		pdev->stats.rx.multicast.num +
5502 		pdev->stats.rx.bcast.num;
5503 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
5504 		pdev->stats.rx.multicast.bytes +
5505 		pdev->stats.rx.bcast.bytes;
5506 }
5507 
5508 /**
5509  * dp_get_device_stats() - get interface level packet stats
5510  * @handle: device handle
5511  * @stats: cdp network device stats structure
5512  * @type: device type pdev/vdev
5513  *
5514  * Return: void
5515  */
5516 static void dp_get_device_stats(void *handle,
5517 		struct cdp_dev_stats *stats, uint8_t type)
5518 {
5519 	switch (type) {
5520 	case UPDATE_VDEV_STATS:
5521 		dp_vdev_getstats(handle, stats);
5522 		break;
5523 	case UPDATE_PDEV_STATS:
5524 		dp_pdev_getstats(handle, stats);
5525 		break;
5526 	default:
5527 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5528 			"apstats cannot be updated for this input "
5529 			"type %d", type);
5530 		break;
5531 	}
5532 
5533 }
5534 
5535 
5536 /**
5537  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
5538  * @pdev: DP_PDEV Handle
5539  *
5540  * Return:void
5541  */
5542 static inline void
5543 dp_print_pdev_tx_stats(struct dp_pdev *pdev)
5544 {
5545 	uint8_t index = 0;
5546 	DP_PRINT_STATS("PDEV Tx Stats:\n");
5547 	DP_PRINT_STATS("Received From Stack:");
5548 	DP_PRINT_STATS("	Packets = %d",
5549 			pdev->stats.tx_i.rcvd.num);
5550 	DP_PRINT_STATS("	Bytes = %llu",
5551 			pdev->stats.tx_i.rcvd.bytes);
5552 	DP_PRINT_STATS("Processed:");
5553 	DP_PRINT_STATS("	Packets = %d",
5554 			pdev->stats.tx_i.processed.num);
5555 	DP_PRINT_STATS("	Bytes = %llu",
5556 			pdev->stats.tx_i.processed.bytes);
5557 	DP_PRINT_STATS("Total Completions:");
5558 	DP_PRINT_STATS("	Packets = %u",
5559 			pdev->stats.tx.comp_pkt.num);
5560 	DP_PRINT_STATS("	Bytes = %llu",
5561 			pdev->stats.tx.comp_pkt.bytes);
5562 	DP_PRINT_STATS("Successful Completions:");
5563 	DP_PRINT_STATS("	Packets = %u",
5564 			pdev->stats.tx.tx_success.num);
5565 	DP_PRINT_STATS("	Bytes = %llu",
5566 			pdev->stats.tx.tx_success.bytes);
5567 	DP_PRINT_STATS("Dropped:");
5568 	DP_PRINT_STATS("	Total = %d",
5569 			pdev->stats.tx_i.dropped.dropped_pkt.num);
5570 	DP_PRINT_STATS("	Dma_map_error = %d",
5571 			pdev->stats.tx_i.dropped.dma_error);
5572 	DP_PRINT_STATS("	Ring Full = %d",
5573 			pdev->stats.tx_i.dropped.ring_full);
5574 	DP_PRINT_STATS("	Descriptor Not available = %d",
5575 			pdev->stats.tx_i.dropped.desc_na.num);
5576 	DP_PRINT_STATS("	HW enqueue failed= %d",
5577 			pdev->stats.tx_i.dropped.enqueue_fail);
5578 	DP_PRINT_STATS("	Resources Full = %d",
5579 			pdev->stats.tx_i.dropped.res_full);
5580 	DP_PRINT_STATS("	FW removed = %d",
5581 			pdev->stats.tx.dropped.fw_rem);
5582 	DP_PRINT_STATS("	FW removed transmitted = %d",
5583 			pdev->stats.tx.dropped.fw_rem_tx);
5584 	DP_PRINT_STATS("	FW removed untransmitted = %d",
5585 			pdev->stats.tx.dropped.fw_rem_notx);
5586 	DP_PRINT_STATS("	FW removed untransmitted fw_reason1 = %d",
5587 			pdev->stats.tx.dropped.fw_reason1);
5588 	DP_PRINT_STATS("	FW removed untransmitted fw_reason2 = %d",
5589 			pdev->stats.tx.dropped.fw_reason2);
5590 	DP_PRINT_STATS("	FW removed untransmitted fw_reason3 = %d",
5591 			pdev->stats.tx.dropped.fw_reason3);
5592 	DP_PRINT_STATS("	Aged Out from msdu/mpdu queues = %d",
5593 			pdev->stats.tx.dropped.age_out);
5594 	DP_PRINT_STATS("	Multicast:");
5595 	DP_PRINT_STATS("	Packets: %u",
5596 		       pdev->stats.tx.mcast.num);
5597 	DP_PRINT_STATS("	Bytes: %llu",
5598 		       pdev->stats.tx.mcast.bytes);
5599 	DP_PRINT_STATS("Scatter Gather:");
5600 	DP_PRINT_STATS("	Packets = %d",
5601 			pdev->stats.tx_i.sg.sg_pkt.num);
5602 	DP_PRINT_STATS("	Bytes = %llu",
5603 			pdev->stats.tx_i.sg.sg_pkt.bytes);
5604 	DP_PRINT_STATS("	Dropped By Host = %d",
5605 			pdev->stats.tx_i.sg.dropped_host.num);
5606 	DP_PRINT_STATS("	Dropped By Target = %d",
5607 			pdev->stats.tx_i.sg.dropped_target);
5608 	DP_PRINT_STATS("TSO:");
5609 	DP_PRINT_STATS("	Number of Segments = %d",
5610 			pdev->stats.tx_i.tso.num_seg);
5611 	DP_PRINT_STATS("	Packets = %d",
5612 			pdev->stats.tx_i.tso.tso_pkt.num);
5613 	DP_PRINT_STATS("	Bytes = %llu",
5614 			pdev->stats.tx_i.tso.tso_pkt.bytes);
5615 	DP_PRINT_STATS("	Dropped By Host = %d",
5616 			pdev->stats.tx_i.tso.dropped_host.num);
5617 	DP_PRINT_STATS("Mcast Enhancement:");
5618 	DP_PRINT_STATS("	Packets = %d",
5619 			pdev->stats.tx_i.mcast_en.mcast_pkt.num);
5620 	DP_PRINT_STATS("	Bytes = %llu",
5621 			pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
5622 	DP_PRINT_STATS("	Dropped: Map Errors = %d",
5623 			pdev->stats.tx_i.mcast_en.dropped_map_error);
5624 	DP_PRINT_STATS("	Dropped: Self Mac = %d",
5625 			pdev->stats.tx_i.mcast_en.dropped_self_mac);
5626 	DP_PRINT_STATS("	Dropped: Send Fail = %d",
5627 			pdev->stats.tx_i.mcast_en.dropped_send_fail);
5628 	DP_PRINT_STATS("	Unicast sent = %d",
5629 			pdev->stats.tx_i.mcast_en.ucast);
5630 	DP_PRINT_STATS("Raw:");
5631 	DP_PRINT_STATS("	Packets = %d",
5632 			pdev->stats.tx_i.raw.raw_pkt.num);
5633 	DP_PRINT_STATS("	Bytes = %llu",
5634 			pdev->stats.tx_i.raw.raw_pkt.bytes);
5635 	DP_PRINT_STATS("	DMA map error = %d",
5636 			pdev->stats.tx_i.raw.dma_map_error);
5637 	DP_PRINT_STATS("Reinjected:");
5638 	DP_PRINT_STATS("	Packets = %d",
5639 			pdev->stats.tx_i.reinject_pkts.num);
5640 	DP_PRINT_STATS("	Bytes = %llu\n",
5641 			pdev->stats.tx_i.reinject_pkts.bytes);
5642 	DP_PRINT_STATS("Inspected:");
5643 	DP_PRINT_STATS("	Packets = %d",
5644 			pdev->stats.tx_i.inspect_pkts.num);
5645 	DP_PRINT_STATS("	Bytes = %llu",
5646 			pdev->stats.tx_i.inspect_pkts.bytes);
5647 	DP_PRINT_STATS("Nawds Multicast:");
5648 	DP_PRINT_STATS("	Packets = %d",
5649 			pdev->stats.tx_i.nawds_mcast.num);
5650 	DP_PRINT_STATS("	Bytes = %llu",
5651 			pdev->stats.tx_i.nawds_mcast.bytes);
5652 	DP_PRINT_STATS("CCE Classified:");
5653 	DP_PRINT_STATS("	CCE Classified Packets: %u",
5654 			pdev->stats.tx_i.cce_classified);
5655 	DP_PRINT_STATS("	RAW CCE Classified Packets: %u",
5656 			pdev->stats.tx_i.cce_classified_raw);
5657 	DP_PRINT_STATS("Mesh stats:");
5658 	DP_PRINT_STATS("	frames to firmware: %u",
5659 			pdev->stats.tx_i.mesh.exception_fw);
5660 	DP_PRINT_STATS("	completions from fw: %u",
5661 			pdev->stats.tx_i.mesh.completion_fw);
5662 	DP_PRINT_STATS("PPDU stats counter");
5663 	for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
5664 		DP_PRINT_STATS("	Tag[%d] = %llu", index,
5665 				pdev->stats.ppdu_stats_counter[index]);
5666 	}
5667 }
5668 
5669 /**
5670  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
5671  * @pdev: DP_PDEV Handle
5672  *
5673  * Return: void
5674  */
5675 static inline void
5676 dp_print_pdev_rx_stats(struct dp_pdev *pdev)
5677 {
5678 	DP_PRINT_STATS("PDEV Rx Stats:\n");
5679 	DP_PRINT_STATS("Received From HW (Per Rx Ring):");
5680 	DP_PRINT_STATS("	Packets = %d %d %d %d",
5681 			pdev->stats.rx.rcvd_reo[0].num,
5682 			pdev->stats.rx.rcvd_reo[1].num,
5683 			pdev->stats.rx.rcvd_reo[2].num,
5684 			pdev->stats.rx.rcvd_reo[3].num);
5685 	DP_PRINT_STATS("	Bytes = %llu %llu %llu %llu",
5686 			pdev->stats.rx.rcvd_reo[0].bytes,
5687 			pdev->stats.rx.rcvd_reo[1].bytes,
5688 			pdev->stats.rx.rcvd_reo[2].bytes,
5689 			pdev->stats.rx.rcvd_reo[3].bytes);
5690 	DP_PRINT_STATS("Replenished:");
5691 	DP_PRINT_STATS("	Packets = %d",
5692 			pdev->stats.replenish.pkts.num);
5693 	DP_PRINT_STATS("	Bytes = %llu",
5694 			pdev->stats.replenish.pkts.bytes);
5695 	DP_PRINT_STATS("	Buffers Added To Freelist = %d",
5696 			pdev->stats.buf_freelist);
5697 	DP_PRINT_STATS("	Low threshold intr = %d",
5698 			pdev->stats.replenish.low_thresh_intrs);
5699 	DP_PRINT_STATS("Dropped:");
5700 	DP_PRINT_STATS("	msdu_not_done = %d",
5701 			pdev->stats.dropped.msdu_not_done);
5702 	DP_PRINT_STATS("        mon_rx_drop = %d",
5703 			pdev->stats.dropped.mon_rx_drop);
5704 	DP_PRINT_STATS("Sent To Stack:");
5705 	DP_PRINT_STATS("	Packets = %d",
5706 			pdev->stats.rx.to_stack.num);
5707 	DP_PRINT_STATS("	Bytes = %llu",
5708 			pdev->stats.rx.to_stack.bytes);
5709 	DP_PRINT_STATS("Multicast/Broadcast:");
5710 	DP_PRINT_STATS("	Packets = %d",
5711 			(pdev->stats.rx.multicast.num +
5712 			pdev->stats.rx.bcast.num));
5713 	DP_PRINT_STATS("	Bytes = %llu",
5714 			(pdev->stats.rx.multicast.bytes +
5715 			pdev->stats.rx.bcast.bytes));
5716 	DP_PRINT_STATS("Errors:");
5717 	DP_PRINT_STATS("	Rxdma Ring Un-inititalized = %d",
5718 			pdev->stats.replenish.rxdma_err);
5719 	DP_PRINT_STATS("	Desc Alloc Failed: = %d",
5720 			pdev->stats.err.desc_alloc_fail);
5721 	DP_PRINT_STATS("	IP checksum error = %d",
5722 		       pdev->stats.err.ip_csum_err);
5723 	DP_PRINT_STATS("	TCP/UDP checksum error = %d",
5724 		       pdev->stats.err.tcp_udp_csum_err);
5725 
5726 	/* Get bar_recv_cnt */
5727 	dp_aggregate_pdev_ctrl_frames_stats(pdev);
5728 	DP_PRINT_STATS("BAR Received Count: = %d",
5729 			pdev->stats.rx.bar_recv_cnt);
5730 
5731 }
5732 
5733 /**
5734  * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
5735  * @pdev: DP_PDEV Handle
5736  *
5737  * Return: void
5738  */
5739 static inline void
5740 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
5741 {
5742 	struct cdp_pdev_mon_stats *rx_mon_stats;
5743 
5744 	rx_mon_stats = &pdev->rx_mon_stats;
5745 
5746 	DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
5747 
5748 	dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
5749 
5750 	DP_PRINT_STATS("status_ppdu_done_cnt = %d",
5751 		       rx_mon_stats->status_ppdu_done);
5752 	DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
5753 		       rx_mon_stats->dest_ppdu_done);
5754 	DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
5755 		       rx_mon_stats->dest_mpdu_done);
5756 	DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
5757 		       rx_mon_stats->dest_mpdu_drop);
5758 }
5759 
5760 /**
5761  * dp_print_soc_tx_stats(): Print SOC level  stats
5762  * @soc DP_SOC Handle
5763  *
5764  * Return: void
5765  */
5766 static inline void
5767 dp_print_soc_tx_stats(struct dp_soc *soc)
5768 {
5769 	uint8_t desc_pool_id;
5770 	soc->stats.tx.desc_in_use = 0;
5771 
5772 	DP_PRINT_STATS("SOC Tx Stats:\n");
5773 
5774 	for (desc_pool_id = 0;
5775 	     desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5776 	     desc_pool_id++)
5777 		soc->stats.tx.desc_in_use +=
5778 			soc->tx_desc[desc_pool_id].num_allocated;
5779 
5780 	DP_PRINT_STATS("Tx Descriptors In Use = %d",
5781 			soc->stats.tx.desc_in_use);
5782 	DP_PRINT_STATS("Invalid peer:");
5783 	DP_PRINT_STATS("	Packets = %d",
5784 			soc->stats.tx.tx_invalid_peer.num);
5785 	DP_PRINT_STATS("	Bytes = %llu",
5786 			soc->stats.tx.tx_invalid_peer.bytes);
5787 	DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
5788 			soc->stats.tx.tcl_ring_full[0],
5789 			soc->stats.tx.tcl_ring_full[1],
5790 			soc->stats.tx.tcl_ring_full[2]);
5791 
5792 }
5793 /**
5794  * dp_print_soc_rx_stats: Print SOC level Rx stats
5795  * @soc: DP_SOC Handle
5796  *
5797  * Return:void
5798  */
5799 static inline void
5800 dp_print_soc_rx_stats(struct dp_soc *soc)
5801 {
5802 	uint32_t i;
5803 	char reo_error[DP_REO_ERR_LENGTH];
5804 	char rxdma_error[DP_RXDMA_ERR_LENGTH];
5805 	uint8_t index = 0;
5806 
5807 	DP_PRINT_STATS("SOC Rx Stats:\n");
5808 	DP_PRINT_STATS("Errors:\n");
5809 	DP_PRINT_STATS("Rx Decrypt Errors = %d",
5810 			(soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
5811 			soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
5812 	DP_PRINT_STATS("Invalid RBM = %d",
5813 			soc->stats.rx.err.invalid_rbm);
5814 	DP_PRINT_STATS("Invalid Vdev = %d",
5815 			soc->stats.rx.err.invalid_vdev);
5816 	DP_PRINT_STATS("Invalid Pdev = %d",
5817 			soc->stats.rx.err.invalid_pdev);
5818 	DP_PRINT_STATS("Invalid Peer = %d",
5819 			soc->stats.rx.err.rx_invalid_peer.num);
5820 	DP_PRINT_STATS("HAL Ring Access Fail = %d",
5821 			soc->stats.rx.err.hal_ring_access_fail);
5822 
5823 	for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
5824 		index += qdf_snprint(&rxdma_error[index],
5825 				DP_RXDMA_ERR_LENGTH - index,
5826 				" %d", soc->stats.rx.err.rxdma_error[i]);
5827 	}
5828 	DP_PRINT_STATS("RXDMA Error (0-31):%s",
5829 			rxdma_error);
5830 
5831 	index = 0;
5832 	for (i = 0; i < HAL_REO_ERR_MAX; i++) {
5833 		index += qdf_snprint(&reo_error[index],
5834 				DP_REO_ERR_LENGTH - index,
5835 				" %d", soc->stats.rx.err.reo_error[i]);
5836 	}
5837 	DP_PRINT_STATS("REO Error(0-14):%s",
5838 			reo_error);
5839 }
5840 
5841 
5842 /**
5843  * dp_print_ring_stat_from_hal(): Print hal level ring stats
5844  * @soc: DP_SOC handle
5845  * @srng: DP_SRNG handle
5846  * @ring_name: SRNG name
5847  *
5848  * Return: void
5849  */
5850 static inline void
5851 dp_print_ring_stat_from_hal(struct dp_soc *soc,  struct dp_srng *srng,
5852 	char *ring_name)
5853 {
5854 	uint32_t tailp;
5855 	uint32_t headp;
5856 
5857 	if (srng->hal_srng != NULL) {
5858 		hal_api_get_tphp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
5859 		DP_PRINT_STATS("%s : Head pointer = %d  Tail Pointer = %d\n",
5860 				ring_name, headp, tailp);
5861 	}
5862 }
5863 
5864 /**
5865  * dp_print_ring_stats(): Print tail and head pointer
5866  * @pdev: DP_PDEV handle
5867  *
5868  * Return:void
5869  */
5870 static inline void
5871 dp_print_ring_stats(struct dp_pdev *pdev)
5872 {
5873 	uint32_t i;
5874 	char ring_name[STR_MAXLEN + 1];
5875 	int mac_id;
5876 
5877 	dp_print_ring_stat_from_hal(pdev->soc,
5878 			&pdev->soc->reo_exception_ring,
5879 			"Reo Exception Ring");
5880 	dp_print_ring_stat_from_hal(pdev->soc,
5881 			&pdev->soc->reo_reinject_ring,
5882 			"Reo Inject Ring");
5883 	dp_print_ring_stat_from_hal(pdev->soc,
5884 			&pdev->soc->reo_cmd_ring,
5885 			"Reo Command Ring");
5886 	dp_print_ring_stat_from_hal(pdev->soc,
5887 			&pdev->soc->reo_status_ring,
5888 			"Reo Status Ring");
5889 	dp_print_ring_stat_from_hal(pdev->soc,
5890 			&pdev->soc->rx_rel_ring,
5891 			"Rx Release ring");
5892 	dp_print_ring_stat_from_hal(pdev->soc,
5893 			&pdev->soc->tcl_cmd_ring,
5894 			"Tcl command Ring");
5895 	dp_print_ring_stat_from_hal(pdev->soc,
5896 			&pdev->soc->tcl_status_ring,
5897 			"Tcl Status Ring");
5898 	dp_print_ring_stat_from_hal(pdev->soc,
5899 			&pdev->soc->wbm_desc_rel_ring,
5900 			"Wbm Desc Rel Ring");
5901 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5902 		snprintf(ring_name, STR_MAXLEN, "Reo Dest Ring %d", i);
5903 		dp_print_ring_stat_from_hal(pdev->soc,
5904 				&pdev->soc->reo_dest_ring[i],
5905 				ring_name);
5906 	}
5907 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
5908 		snprintf(ring_name, STR_MAXLEN, "Tcl Data Ring %d", i);
5909 		dp_print_ring_stat_from_hal(pdev->soc,
5910 				&pdev->soc->tcl_data_ring[i],
5911 				ring_name);
5912 	}
5913 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
5914 		snprintf(ring_name, STR_MAXLEN, "Tx Comp Ring %d", i);
5915 		dp_print_ring_stat_from_hal(pdev->soc,
5916 				&pdev->soc->tx_comp_ring[i],
5917 				ring_name);
5918 	}
5919 	dp_print_ring_stat_from_hal(pdev->soc,
5920 			&pdev->rx_refill_buf_ring,
5921 			"Rx Refill Buf Ring");
5922 
5923 	dp_print_ring_stat_from_hal(pdev->soc,
5924 			&pdev->rx_refill_buf_ring2,
5925 			"Second Rx Refill Buf Ring");
5926 
5927 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5928 		dp_print_ring_stat_from_hal(pdev->soc,
5929 				&pdev->rxdma_mon_buf_ring[mac_id],
5930 				"Rxdma Mon Buf Ring");
5931 		dp_print_ring_stat_from_hal(pdev->soc,
5932 				&pdev->rxdma_mon_dst_ring[mac_id],
5933 				"Rxdma Mon Dst Ring");
5934 		dp_print_ring_stat_from_hal(pdev->soc,
5935 				&pdev->rxdma_mon_status_ring[mac_id],
5936 				"Rxdma Mon Status Ring");
5937 		dp_print_ring_stat_from_hal(pdev->soc,
5938 				&pdev->rxdma_mon_desc_ring[mac_id],
5939 				"Rxdma mon desc Ring");
5940 	}
5941 
5942 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
5943 		snprintf(ring_name, STR_MAXLEN, "Rxdma err dst ring %d", i);
5944 		dp_print_ring_stat_from_hal(pdev->soc,
5945 			&pdev->rxdma_err_dst_ring[i],
5946 			ring_name);
5947 	}
5948 
5949 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
5950 		snprintf(ring_name, STR_MAXLEN, "Rx mac buf ring %d", i);
5951 		dp_print_ring_stat_from_hal(pdev->soc,
5952 				&pdev->rx_mac_buf_ring[i],
5953 				ring_name);
5954 	}
5955 }
5956 
5957 /**
5958  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
5959  * @vdev: DP_VDEV handle
5960  *
5961  * Return:void
5962  */
5963 static inline void
5964 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
5965 {
5966 	struct dp_peer *peer = NULL;
5967 	struct dp_soc *soc = (struct dp_soc *)vdev->pdev->soc;
5968 
5969 	DP_STATS_CLR(vdev->pdev);
5970 	DP_STATS_CLR(vdev->pdev->soc);
5971 	DP_STATS_CLR(vdev);
5972 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5973 		if (!peer)
5974 			return;
5975 		DP_STATS_CLR(peer);
5976 
5977 		if (soc->cdp_soc.ol_ops->update_dp_stats) {
5978 			soc->cdp_soc.ol_ops->update_dp_stats(
5979 					vdev->pdev->ctrl_pdev,
5980 					&peer->stats,
5981 					peer->peer_ids[0],
5982 					UPDATE_PEER_STATS);
5983 		}
5984 
5985 	}
5986 
5987 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5988 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
5989 				&vdev->stats, (uint16_t)vdev->vdev_id,
5990 				UPDATE_VDEV_STATS);
5991 }
5992 
5993 /**
5994  * dp_print_common_rates_info(): Print common rate for tx or rx
5995  * @pkt_type_array: rate type array contains rate info
5996  *
5997  * Return:void
5998  */
5999 static inline void
6000 dp_print_common_rates_info(struct cdp_pkt_type *pkt_type_array)
6001 {
6002 	uint8_t mcs, pkt_type;
6003 
6004 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
6005 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
6006 			if (!dp_rate_string[pkt_type][mcs].valid)
6007 				continue;
6008 
6009 			DP_PRINT_STATS("	%s = %d",
6010 				       dp_rate_string[pkt_type][mcs].mcs_type,
6011 				       pkt_type_array[pkt_type].mcs_count[mcs]);
6012 		}
6013 
6014 		DP_PRINT_STATS("\n");
6015 	}
6016 }
6017 
6018 /**
6019  * dp_print_rx_rates(): Print Rx rate stats
6020  * @vdev: DP_VDEV handle
6021  *
6022  * Return:void
6023  */
6024 static inline void
6025 dp_print_rx_rates(struct dp_vdev *vdev)
6026 {
6027 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6028 	uint8_t i;
6029 	uint8_t index = 0;
6030 	char nss[DP_NSS_LENGTH];
6031 
6032 	DP_PRINT_STATS("Rx Rate Info:\n");
6033 	dp_print_common_rates_info(pdev->stats.rx.pkt_type);
6034 
6035 
6036 	index = 0;
6037 	for (i = 0; i < SS_COUNT; i++) {
6038 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6039 				" %d", pdev->stats.rx.nss[i]);
6040 	}
6041 	DP_PRINT_STATS("NSS(1-8) = %s",
6042 			nss);
6043 
6044 	DP_PRINT_STATS("SGI ="
6045 			" 0.8us %d,"
6046 			" 0.4us %d,"
6047 			" 1.6us %d,"
6048 			" 3.2us %d,",
6049 			pdev->stats.rx.sgi_count[0],
6050 			pdev->stats.rx.sgi_count[1],
6051 			pdev->stats.rx.sgi_count[2],
6052 			pdev->stats.rx.sgi_count[3]);
6053 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
6054 			pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
6055 			pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
6056 	DP_PRINT_STATS("Reception Type ="
6057 			" SU: %d,"
6058 			" MU_MIMO:%d,"
6059 			" MU_OFDMA:%d,"
6060 			" MU_OFDMA_MIMO:%d\n",
6061 			pdev->stats.rx.reception_type[0],
6062 			pdev->stats.rx.reception_type[1],
6063 			pdev->stats.rx.reception_type[2],
6064 			pdev->stats.rx.reception_type[3]);
6065 	DP_PRINT_STATS("Aggregation:\n");
6066 	DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
6067 			pdev->stats.rx.ampdu_cnt);
6068 	DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
6069 			pdev->stats.rx.non_ampdu_cnt);
6070 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
6071 			pdev->stats.rx.amsdu_cnt);
6072 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
6073 			pdev->stats.rx.non_amsdu_cnt);
6074 }
6075 
6076 /**
6077  * dp_print_tx_rates(): Print tx rates
6078  * @vdev: DP_VDEV handle
6079  *
6080  * Return:void
6081  */
6082 static inline void
6083 dp_print_tx_rates(struct dp_vdev *vdev)
6084 {
6085 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6086 	uint8_t index;
6087 	char nss[DP_NSS_LENGTH];
6088 	int nss_index;
6089 
6090 	DP_PRINT_STATS("Tx Rate Info:\n");
6091 	dp_print_common_rates_info(pdev->stats.tx.pkt_type);
6092 
6093 	DP_PRINT_STATS("SGI ="
6094 			" 0.8us %d"
6095 			" 0.4us %d"
6096 			" 1.6us %d"
6097 			" 3.2us %d",
6098 			pdev->stats.tx.sgi_count[0],
6099 			pdev->stats.tx.sgi_count[1],
6100 			pdev->stats.tx.sgi_count[2],
6101 			pdev->stats.tx.sgi_count[3]);
6102 
6103 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
6104 			pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
6105 			pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
6106 
6107 	index = 0;
6108 	for (nss_index = 0; nss_index < SS_COUNT; nss_index++) {
6109 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6110 				" %d", pdev->stats.tx.nss[nss_index]);
6111 	}
6112 
6113 	DP_PRINT_STATS("NSS(1-8) = %s", nss);
6114 	DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
6115 	DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
6116 	DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
6117 	DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
6118 	DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
6119 
6120 	DP_PRINT_STATS("Aggregation:\n");
6121 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
6122 			pdev->stats.tx.amsdu_cnt);
6123 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
6124 			pdev->stats.tx.non_amsdu_cnt);
6125 }
6126 
6127 /**
6128  * dp_print_peer_stats():print peer stats
6129  * @peer: DP_PEER handle
6130  *
6131  * return void
6132  */
6133 static inline void dp_print_peer_stats(struct dp_peer *peer)
6134 {
6135 	uint8_t i;
6136 	uint32_t index;
6137 	char nss[DP_NSS_LENGTH];
6138 	DP_PRINT_STATS("Node Tx Stats:\n");
6139 	DP_PRINT_STATS("Total Packet Completions = %d",
6140 			peer->stats.tx.comp_pkt.num);
6141 	DP_PRINT_STATS("Total Bytes Completions = %llu",
6142 			peer->stats.tx.comp_pkt.bytes);
6143 	DP_PRINT_STATS("Success Packets = %d",
6144 			peer->stats.tx.tx_success.num);
6145 	DP_PRINT_STATS("Success Bytes = %llu",
6146 			peer->stats.tx.tx_success.bytes);
6147 	DP_PRINT_STATS("Unicast Success Packets = %d",
6148 			peer->stats.tx.ucast.num);
6149 	DP_PRINT_STATS("Unicast Success Bytes = %llu",
6150 			peer->stats.tx.ucast.bytes);
6151 	DP_PRINT_STATS("Multicast Success Packets = %d",
6152 			peer->stats.tx.mcast.num);
6153 	DP_PRINT_STATS("Multicast Success Bytes = %llu",
6154 			peer->stats.tx.mcast.bytes);
6155 	DP_PRINT_STATS("Broadcast Success Packets = %d",
6156 			peer->stats.tx.bcast.num);
6157 	DP_PRINT_STATS("Broadcast Success Bytes = %llu",
6158 			peer->stats.tx.bcast.bytes);
6159 	DP_PRINT_STATS("Packets Failed = %d",
6160 			peer->stats.tx.tx_failed);
6161 	DP_PRINT_STATS("Packets In OFDMA = %d",
6162 			peer->stats.tx.ofdma);
6163 	DP_PRINT_STATS("Packets In STBC = %d",
6164 			peer->stats.tx.stbc);
6165 	DP_PRINT_STATS("Packets In LDPC = %d",
6166 			peer->stats.tx.ldpc);
6167 	DP_PRINT_STATS("Packet Retries = %d",
6168 			peer->stats.tx.retries);
6169 	DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
6170 			peer->stats.tx.amsdu_cnt);
6171 	DP_PRINT_STATS("Last Packet RSSI = %d",
6172 			peer->stats.tx.last_ack_rssi);
6173 	DP_PRINT_STATS("Dropped At FW: Removed = %d",
6174 			peer->stats.tx.dropped.fw_rem);
6175 	DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
6176 			peer->stats.tx.dropped.fw_rem_tx);
6177 	DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
6178 			peer->stats.tx.dropped.fw_rem_notx);
6179 	DP_PRINT_STATS("Dropped : Age Out = %d",
6180 			peer->stats.tx.dropped.age_out);
6181 	DP_PRINT_STATS("NAWDS : ");
6182 	DP_PRINT_STATS("	Nawds multicast Drop Tx Packet = %d",
6183 			peer->stats.tx.nawds_mcast_drop);
6184 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Count = %d",
6185 			peer->stats.tx.nawds_mcast.num);
6186 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Bytes = %llu",
6187 			peer->stats.tx.nawds_mcast.bytes);
6188 
6189 	DP_PRINT_STATS("Rate Info:");
6190 	dp_print_common_rates_info(peer->stats.tx.pkt_type);
6191 
6192 
6193 	DP_PRINT_STATS("SGI = "
6194 			" 0.8us %d"
6195 			" 0.4us %d"
6196 			" 1.6us %d"
6197 			" 3.2us %d",
6198 			peer->stats.tx.sgi_count[0],
6199 			peer->stats.tx.sgi_count[1],
6200 			peer->stats.tx.sgi_count[2],
6201 			peer->stats.tx.sgi_count[3]);
6202 	DP_PRINT_STATS("Excess Retries per AC ");
6203 	DP_PRINT_STATS("	 Best effort = %d",
6204 			peer->stats.tx.excess_retries_per_ac[0]);
6205 	DP_PRINT_STATS("	 Background= %d",
6206 			peer->stats.tx.excess_retries_per_ac[1]);
6207 	DP_PRINT_STATS("	 Video = %d",
6208 			peer->stats.tx.excess_retries_per_ac[2]);
6209 	DP_PRINT_STATS("	 Voice = %d",
6210 			peer->stats.tx.excess_retries_per_ac[3]);
6211 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
6212 			peer->stats.tx.bw[2], peer->stats.tx.bw[3],
6213 			peer->stats.tx.bw[4], peer->stats.tx.bw[5]);
6214 
6215 	index = 0;
6216 	for (i = 0; i < SS_COUNT; i++) {
6217 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6218 				" %d", peer->stats.tx.nss[i]);
6219 	}
6220 	DP_PRINT_STATS("NSS(1-8) = %s",
6221 			nss);
6222 
6223 	DP_PRINT_STATS("Aggregation:");
6224 	DP_PRINT_STATS("	Number of Msdu's Part of Amsdu = %d",
6225 			peer->stats.tx.amsdu_cnt);
6226 	DP_PRINT_STATS("	Number of Msdu's With No Msdu Level Aggregation = %d\n",
6227 			peer->stats.tx.non_amsdu_cnt);
6228 
6229 	DP_PRINT_STATS("Bytes and Packets transmitted  in last one sec:");
6230 	DP_PRINT_STATS("	Bytes transmitted in last sec: %d",
6231 		       peer->stats.tx.tx_byte_rate);
6232 	DP_PRINT_STATS("	Data transmitted in last sec: %d",
6233 		       peer->stats.tx.tx_data_rate);
6234 
6235 	DP_PRINT_STATS("Node Rx Stats:");
6236 	DP_PRINT_STATS("Packets Sent To Stack = %d",
6237 			peer->stats.rx.to_stack.num);
6238 	DP_PRINT_STATS("Bytes Sent To Stack = %llu",
6239 			peer->stats.rx.to_stack.bytes);
6240 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
6241 		DP_PRINT_STATS("Ring Id = %d", i);
6242 		DP_PRINT_STATS("	Packets Received = %d",
6243 				peer->stats.rx.rcvd_reo[i].num);
6244 		DP_PRINT_STATS("	Bytes Received = %llu",
6245 				peer->stats.rx.rcvd_reo[i].bytes);
6246 	}
6247 	DP_PRINT_STATS("Multicast Packets Received = %d",
6248 			peer->stats.rx.multicast.num);
6249 	DP_PRINT_STATS("Multicast Bytes Received = %llu",
6250 			peer->stats.rx.multicast.bytes);
6251 	DP_PRINT_STATS("Broadcast Packets Received = %d",
6252 			peer->stats.rx.bcast.num);
6253 	DP_PRINT_STATS("Broadcast Bytes Received = %llu",
6254 			peer->stats.rx.bcast.bytes);
6255 	DP_PRINT_STATS("Intra BSS Packets Received = %d",
6256 			peer->stats.rx.intra_bss.pkts.num);
6257 	DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
6258 			peer->stats.rx.intra_bss.pkts.bytes);
6259 	DP_PRINT_STATS("Raw Packets Received = %d",
6260 			peer->stats.rx.raw.num);
6261 	DP_PRINT_STATS("Raw Bytes Received = %llu",
6262 			peer->stats.rx.raw.bytes);
6263 	DP_PRINT_STATS("Errors: MIC Errors = %d",
6264 			peer->stats.rx.err.mic_err);
6265 	DP_PRINT_STATS("Erros: Decryption Errors = %d",
6266 			peer->stats.rx.err.decrypt_err);
6267 	DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
6268 			peer->stats.rx.non_ampdu_cnt);
6269 	DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
6270 			peer->stats.rx.ampdu_cnt);
6271 	DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
6272 			peer->stats.rx.non_amsdu_cnt);
6273 	DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
6274 			peer->stats.rx.amsdu_cnt);
6275 	DP_PRINT_STATS("NAWDS : ");
6276 	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet = %d",
6277 			peer->stats.rx.nawds_mcast_drop);
6278 	DP_PRINT_STATS("SGI ="
6279 			" 0.8us %d"
6280 			" 0.4us %d"
6281 			" 1.6us %d"
6282 			" 3.2us %d",
6283 			peer->stats.rx.sgi_count[0],
6284 			peer->stats.rx.sgi_count[1],
6285 			peer->stats.rx.sgi_count[2],
6286 			peer->stats.rx.sgi_count[3]);
6287 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
6288 			peer->stats.rx.bw[0], peer->stats.rx.bw[1],
6289 			peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
6290 	DP_PRINT_STATS("Reception Type ="
6291 			" SU %d,"
6292 			" MU_MIMO %d,"
6293 			" MU_OFDMA %d,"
6294 			" MU_OFDMA_MIMO %d",
6295 			peer->stats.rx.reception_type[0],
6296 			peer->stats.rx.reception_type[1],
6297 			peer->stats.rx.reception_type[2],
6298 			peer->stats.rx.reception_type[3]);
6299 
6300 	dp_print_common_rates_info(peer->stats.rx.pkt_type);
6301 
6302 	index = 0;
6303 	for (i = 0; i < SS_COUNT; i++) {
6304 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6305 				" %d", peer->stats.rx.nss[i]);
6306 	}
6307 	DP_PRINT_STATS("NSS(1-8) = %s",
6308 			nss);
6309 
6310 	DP_PRINT_STATS("Aggregation:");
6311 	DP_PRINT_STATS("	Msdu's Part of Ampdu = %d",
6312 			peer->stats.rx.ampdu_cnt);
6313 	DP_PRINT_STATS("	Msdu's With No Mpdu Level Aggregation = %d",
6314 			peer->stats.rx.non_ampdu_cnt);
6315 	DP_PRINT_STATS("	Msdu's Part of Amsdu = %d",
6316 			peer->stats.rx.amsdu_cnt);
6317 	DP_PRINT_STATS("	Msdu's With No Msdu Level Aggregation = %d",
6318 			peer->stats.rx.non_amsdu_cnt);
6319 
6320 	DP_PRINT_STATS("Bytes and Packets received in last one sec:");
6321 	DP_PRINT_STATS("	Bytes received in last sec: %d",
6322 		       peer->stats.rx.rx_byte_rate);
6323 	DP_PRINT_STATS("	Data received in last sec: %d",
6324 		       peer->stats.rx.rx_data_rate);
6325 }
6326 
6327 /*
6328  * dp_get_host_peer_stats()- function to print peer stats
6329  * @pdev_handle: DP_PDEV handle
6330  * @mac_addr: mac address of the peer
6331  *
6332  * Return: void
6333  */
6334 static void
6335 dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
6336 {
6337 	struct dp_peer *peer;
6338 	uint8_t local_id;
6339 
6340 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
6341 			&local_id);
6342 
6343 	if (!peer) {
6344 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6345 			  "%s: Invalid peer\n", __func__);
6346 		return;
6347 	}
6348 
6349 	dp_print_peer_stats(peer);
6350 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
6351 }
6352 
6353 /**
6354  * dp_print_host_stats()- Function to print the stats aggregated at host
6355  * @vdev_handle: DP_VDEV handle
6356  * @type: host stats type
6357  *
6358  * Available Stat types
6359  * TXRX_CLEAR_STATS  : Clear the stats
6360  * TXRX_RX_RATE_STATS: Print Rx Rate Info
6361  * TXRX_TX_RATE_STATS: Print Tx Rate Info
6362  * TXRX_TX_HOST_STATS: Print Tx Stats
6363  * TXRX_RX_HOST_STATS: Print Rx Stats
6364  * TXRX_AST_STATS: Print AST Stats
6365  * TXRX_SRNG_PTR_STATS: Print SRNG ring pointer stats
6366  *
6367  * Return: 0 on success, print error message in case of failure
6368  */
6369 static int
6370 dp_print_host_stats(struct cdp_vdev *vdev_handle,
6371 		    struct cdp_txrx_stats_req *req)
6372 {
6373 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6374 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6375 	enum cdp_host_txrx_stats type =
6376 			dp_stats_mapping_table[req->stats][STATS_HOST];
6377 
6378 	dp_aggregate_pdev_stats(pdev);
6379 
6380 	switch (type) {
6381 	case TXRX_CLEAR_STATS:
6382 		dp_txrx_host_stats_clr(vdev);
6383 		break;
6384 	case TXRX_RX_RATE_STATS:
6385 		dp_print_rx_rates(vdev);
6386 		break;
6387 	case TXRX_TX_RATE_STATS:
6388 		dp_print_tx_rates(vdev);
6389 		break;
6390 	case TXRX_TX_HOST_STATS:
6391 		dp_print_pdev_tx_stats(pdev);
6392 		dp_print_soc_tx_stats(pdev->soc);
6393 		break;
6394 	case TXRX_RX_HOST_STATS:
6395 		dp_print_pdev_rx_stats(pdev);
6396 		dp_print_soc_rx_stats(pdev->soc);
6397 		break;
6398 	case TXRX_AST_STATS:
6399 		dp_print_ast_stats(pdev->soc);
6400 		dp_print_peer_table(vdev);
6401 		break;
6402 	case TXRX_SRNG_PTR_STATS:
6403 		dp_print_ring_stats(pdev);
6404 		break;
6405 	case TXRX_RX_MON_STATS:
6406 		dp_print_pdev_rx_mon_stats(pdev);
6407 		break;
6408 	case TXRX_REO_QUEUE_STATS:
6409 		dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
6410 		break;
6411 	default:
6412 		DP_TRACE(FATAL, "Wrong Input For TxRx Host Stats");
6413 		break;
6414 	}
6415 	return 0;
6416 }
6417 
6418 /*
6419  * dp_ppdu_ring_reset()- Reset PPDU Stats ring
6420  * @pdev: DP_PDEV handle
6421  *
6422  * Return: void
6423  */
6424 static void
6425 dp_ppdu_ring_reset(struct dp_pdev *pdev)
6426 {
6427 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
6428 	int mac_id;
6429 
6430 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
6431 
6432 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6433 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6434 							pdev->pdev_id);
6435 
6436 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6437 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6438 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6439 	}
6440 }
6441 
6442 /*
6443  * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
6444  * @pdev: DP_PDEV handle
6445  *
6446  * Return: void
6447  */
6448 static void
6449 dp_ppdu_ring_cfg(struct dp_pdev *pdev)
6450 {
6451 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
6452 	int mac_id;
6453 
6454 	htt_tlv_filter.mpdu_start = 1;
6455 	htt_tlv_filter.msdu_start = 0;
6456 	htt_tlv_filter.packet = 0;
6457 	htt_tlv_filter.msdu_end = 0;
6458 	htt_tlv_filter.mpdu_end = 0;
6459 	htt_tlv_filter.attention = 0;
6460 	htt_tlv_filter.ppdu_start = 1;
6461 	htt_tlv_filter.ppdu_end = 1;
6462 	htt_tlv_filter.ppdu_end_user_stats = 1;
6463 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6464 	htt_tlv_filter.ppdu_end_status_done = 1;
6465 	htt_tlv_filter.enable_fp = 1;
6466 	htt_tlv_filter.enable_md = 0;
6467 	if (pdev->mcopy_mode) {
6468 		htt_tlv_filter.packet_header = 1;
6469 		htt_tlv_filter.enable_mo = 1;
6470 	}
6471 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6472 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6473 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6474 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6475 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6476 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
6477 
6478 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6479 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6480 						pdev->pdev_id);
6481 
6482 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6483 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6484 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6485 	}
6486 }
6487 
6488 /*
6489  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
6490  *                              modes are enabled or not.
6491  * @dp_pdev: dp pdev handle.
6492  *
6493  * Return: bool
6494  */
6495 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
6496 {
6497 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
6498 	    !pdev->mcopy_mode)
6499 		return true;
6500 	else
6501 		return false;
6502 }
6503 
6504 /*
6505  *dp_set_bpr_enable() - API to enable/disable bpr feature
6506  *@pdev_handle: DP_PDEV handle.
6507  *@val: Provided value.
6508  *
6509  *Return: void
6510  */
6511 static void
6512 dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
6513 {
6514 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6515 
6516 	switch (val) {
6517 	case CDP_BPR_DISABLE:
6518 		pdev->bpr_enable = CDP_BPR_DISABLE;
6519 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6520 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
6521 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6522 		} else if (pdev->enhanced_stats_en &&
6523 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6524 			   !pdev->pktlog_ppdu_stats) {
6525 			dp_h2t_cfg_stats_msg_send(pdev,
6526 						  DP_PPDU_STATS_CFG_ENH_STATS,
6527 						  pdev->pdev_id);
6528 		}
6529 		break;
6530 	case CDP_BPR_ENABLE:
6531 		pdev->bpr_enable = CDP_BPR_ENABLE;
6532 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
6533 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
6534 			dp_h2t_cfg_stats_msg_send(pdev,
6535 						  DP_PPDU_STATS_CFG_BPR,
6536 						  pdev->pdev_id);
6537 		} else if (pdev->enhanced_stats_en &&
6538 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6539 			   !pdev->pktlog_ppdu_stats) {
6540 			dp_h2t_cfg_stats_msg_send(pdev,
6541 						  DP_PPDU_STATS_CFG_BPR_ENH,
6542 						  pdev->pdev_id);
6543 		} else if (pdev->pktlog_ppdu_stats) {
6544 			dp_h2t_cfg_stats_msg_send(pdev,
6545 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
6546 						  pdev->pdev_id);
6547 		}
6548 		break;
6549 	default:
6550 		break;
6551 	}
6552 }
6553 
6554 /*
6555  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
6556  * @pdev_handle: DP_PDEV handle
6557  * @val: user provided value
6558  *
6559  * Return: void
6560  */
6561 static void
6562 dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
6563 {
6564 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6565 
6566 	switch (val) {
6567 	case 0:
6568 		pdev->tx_sniffer_enable = 0;
6569 		pdev->mcopy_mode = 0;
6570 
6571 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6572 		    !pdev->bpr_enable) {
6573 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6574 			dp_ppdu_ring_reset(pdev);
6575 		} else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
6576 			dp_h2t_cfg_stats_msg_send(pdev,
6577 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
6578 		} else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
6579 			dp_h2t_cfg_stats_msg_send(pdev,
6580 						  DP_PPDU_STATS_CFG_BPR_ENH,
6581 						  pdev->pdev_id);
6582 		} else {
6583 			dp_h2t_cfg_stats_msg_send(pdev,
6584 						  DP_PPDU_STATS_CFG_BPR,
6585 						  pdev->pdev_id);
6586 		}
6587 		break;
6588 
6589 	case 1:
6590 		pdev->tx_sniffer_enable = 1;
6591 		pdev->mcopy_mode = 0;
6592 
6593 		if (!pdev->pktlog_ppdu_stats)
6594 			dp_h2t_cfg_stats_msg_send(pdev,
6595 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
6596 		break;
6597 	case 2:
6598 		pdev->mcopy_mode = 1;
6599 		pdev->tx_sniffer_enable = 0;
6600 		dp_ppdu_ring_cfg(pdev);
6601 
6602 		if (!pdev->pktlog_ppdu_stats)
6603 			dp_h2t_cfg_stats_msg_send(pdev,
6604 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
6605 		break;
6606 	default:
6607 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6608 			"Invalid value");
6609 		break;
6610 	}
6611 }
6612 
6613 /*
6614  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
6615  * @pdev_handle: DP_PDEV handle
6616  *
6617  * Return: void
6618  */
6619 static void
6620 dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
6621 {
6622 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6623 
6624 	if (pdev->enhanced_stats_en == 0)
6625 		dp_cal_client_timer_start(pdev->cal_client_ctx);
6626 
6627 	pdev->enhanced_stats_en = 1;
6628 
6629 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added)
6630 		dp_ppdu_ring_cfg(pdev);
6631 
6632 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
6633 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
6634 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
6635 		dp_h2t_cfg_stats_msg_send(pdev,
6636 					  DP_PPDU_STATS_CFG_BPR_ENH,
6637 					  pdev->pdev_id);
6638 	}
6639 }
6640 
6641 /*
6642  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
6643  * @pdev_handle: DP_PDEV handle
6644  *
6645  * Return: void
6646  */
6647 static void
6648 dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
6649 {
6650 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6651 
6652 	if (pdev->enhanced_stats_en == 1)
6653 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
6654 
6655 	pdev->enhanced_stats_en = 0;
6656 
6657 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
6658 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6659 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
6660 		dp_h2t_cfg_stats_msg_send(pdev,
6661 					  DP_PPDU_STATS_CFG_BPR,
6662 					  pdev->pdev_id);
6663 	}
6664 
6665 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added)
6666 		dp_ppdu_ring_reset(pdev);
6667 }
6668 
6669 /*
6670  * dp_get_fw_peer_stats()- function to print peer stats
6671  * @pdev_handle: DP_PDEV handle
6672  * @mac_addr: mac address of the peer
6673  * @cap: Type of htt stats requested
6674  *
6675  * Currently Supporting only MAC ID based requests Only
6676  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
6677  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
6678  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
6679  *
6680  * Return: void
6681  */
6682 static void
6683 dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
6684 		uint32_t cap)
6685 {
6686 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6687 	int i;
6688 	uint32_t config_param0 = 0;
6689 	uint32_t config_param1 = 0;
6690 	uint32_t config_param2 = 0;
6691 	uint32_t config_param3 = 0;
6692 
6693 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
6694 	config_param0 |= (1 << (cap + 1));
6695 
6696 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
6697 		config_param1 |= (1 << i);
6698 	}
6699 
6700 	config_param2 |= (mac_addr[0] & 0x000000ff);
6701 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
6702 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
6703 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
6704 
6705 	config_param3 |= (mac_addr[4] & 0x000000ff);
6706 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
6707 
6708 	dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
6709 			config_param0, config_param1, config_param2,
6710 			config_param3, 0, 0, 0);
6711 
6712 }
6713 
6714 /* This struct definition will be removed from here
6715  * once it get added in FW headers*/
6716 struct httstats_cmd_req {
6717     uint32_t    config_param0;
6718     uint32_t    config_param1;
6719     uint32_t    config_param2;
6720     uint32_t    config_param3;
6721     int cookie;
6722     u_int8_t    stats_id;
6723 };
6724 
6725 /*
6726  * dp_get_htt_stats: function to process the httstas request
6727  * @pdev_handle: DP pdev handle
6728  * @data: pointer to request data
6729  * @data_len: length for request data
6730  *
6731  * return: void
6732  */
6733 static void
6734 dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
6735 {
6736 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6737 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
6738 
6739 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
6740 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
6741 				req->config_param0, req->config_param1,
6742 				req->config_param2, req->config_param3,
6743 				req->cookie, 0, 0);
6744 }
6745 
6746 /*
6747  * dp_set_pdev_param: function to set parameters in pdev
6748  * @pdev_handle: DP pdev handle
6749  * @param: parameter type to be set
6750  * @val: value of parameter to be set
6751  *
6752  * return: void
6753  */
6754 static void dp_set_pdev_param(struct cdp_pdev *pdev_handle,
6755 		enum cdp_pdev_param_type param, uint8_t val)
6756 {
6757 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6758 	switch (param) {
6759 	case CDP_CONFIG_DEBUG_SNIFFER:
6760 		dp_config_debug_sniffer(pdev_handle, val);
6761 		break;
6762 	case CDP_CONFIG_BPR_ENABLE:
6763 		dp_set_bpr_enable(pdev_handle, val);
6764 		break;
6765 	case CDP_CONFIG_PRIMARY_RADIO:
6766 		pdev->is_primary = val;
6767 		break;
6768 	default:
6769 		break;
6770 	}
6771 }
6772 
6773 /*
6774  * dp_set_vdev_param: function to set parameters in vdev
6775  * @param: parameter type to be set
6776  * @val: value of parameter to be set
6777  *
6778  * return: void
6779  */
6780 static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
6781 		enum cdp_vdev_param_type param, uint32_t val)
6782 {
6783 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6784 	switch (param) {
6785 	case CDP_ENABLE_WDS:
6786 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6787 			  "wds_enable %d for vdev(%p) id(%d)\n",
6788 			  val, vdev, vdev->vdev_id);
6789 		vdev->wds_enabled = val;
6790 		break;
6791 	case CDP_ENABLE_NAWDS:
6792 		vdev->nawds_enabled = val;
6793 		break;
6794 	case CDP_ENABLE_MCAST_EN:
6795 		vdev->mcast_enhancement_en = val;
6796 		break;
6797 	case CDP_ENABLE_PROXYSTA:
6798 		vdev->proxysta_vdev = val;
6799 		break;
6800 	case CDP_UPDATE_TDLS_FLAGS:
6801 		vdev->tdls_link_connected = val;
6802 		break;
6803 	case CDP_CFG_WDS_AGING_TIMER:
6804 		if (val == 0)
6805 			qdf_timer_stop(&vdev->pdev->soc->wds_aging_timer);
6806 		else if (val != vdev->wds_aging_timer_val)
6807 			qdf_timer_mod(&vdev->pdev->soc->wds_aging_timer, val);
6808 
6809 		vdev->wds_aging_timer_val = val;
6810 		break;
6811 	case CDP_ENABLE_AP_BRIDGE:
6812 		if (wlan_op_mode_sta != vdev->opmode)
6813 			vdev->ap_bridge_enabled = val;
6814 		else
6815 			vdev->ap_bridge_enabled = false;
6816 		break;
6817 	case CDP_ENABLE_CIPHER:
6818 		vdev->sec_type = val;
6819 		break;
6820 	case CDP_ENABLE_QWRAP_ISOLATION:
6821 		vdev->isolation_vdev = val;
6822 		break;
6823 	default:
6824 		break;
6825 	}
6826 
6827 	dp_tx_vdev_update_search_flags(vdev);
6828 }
6829 
6830 /**
6831  * dp_peer_set_nawds: set nawds bit in peer
6832  * @peer_handle: pointer to peer
6833  * @value: enable/disable nawds
6834  *
6835  * return: void
6836  */
6837 static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
6838 {
6839 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6840 	peer->nawds_enabled = value;
6841 }
6842 
6843 /*
6844  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
6845  * @vdev_handle: DP_VDEV handle
6846  * @map_id:ID of map that needs to be updated
6847  *
6848  * Return: void
6849  */
6850 static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
6851 		uint8_t map_id)
6852 {
6853 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6854 	vdev->dscp_tid_map_id = map_id;
6855 	return;
6856 }
6857 
6858 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
6859  * @peer_handle: DP_PEER handle
6860  *
6861  * return : cdp_peer_stats pointer
6862  */
6863 static struct cdp_peer_stats*
6864 		dp_txrx_get_peer_stats(struct cdp_peer *peer_handle)
6865 {
6866 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6867 
6868 	qdf_assert(peer);
6869 
6870 	return &peer->stats;
6871 }
6872 
6873 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
6874  * @peer_handle: DP_PEER handle
6875  *
6876  * return : void
6877  */
6878 static void dp_txrx_reset_peer_stats(struct cdp_peer *peer_handle)
6879 {
6880 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6881 
6882 	qdf_assert(peer);
6883 
6884 	qdf_mem_set(&peer->stats, sizeof(peer->stats), 0);
6885 }
6886 
6887 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
6888  * @vdev_handle: DP_VDEV handle
6889  * @buf: buffer for vdev stats
6890  *
6891  * return : int
6892  */
6893 static int  dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
6894 				   bool is_aggregate)
6895 {
6896 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6897 	struct cdp_vdev_stats *vdev_stats = (struct cdp_vdev_stats *)buf;
6898 
6899 	if (is_aggregate)
6900 		dp_aggregate_vdev_stats(vdev, buf);
6901 	else
6902 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
6903 
6904 	return 0;
6905 }
6906 
6907 /*
6908  * dp_txrx_stats_publish(): publish pdev stats into a buffer
6909  * @pdev_handle: DP_PDEV handle
6910  * @buf: to hold pdev_stats
6911  *
6912  * Return: int
6913  */
6914 static int
6915 dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
6916 {
6917 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6918 	struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
6919 	struct cdp_txrx_stats_req req = {0,};
6920 
6921 	dp_aggregate_pdev_stats(pdev);
6922 	req.stats = HTT_DBG_EXT_STATS_PDEV_TX;
6923 	req.cookie_val = 1;
6924 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
6925 				req.param1, req.param2, req.param3, 0,
6926 				req.cookie_val, 0);
6927 
6928 	msleep(DP_MAX_SLEEP_TIME);
6929 
6930 	req.stats = HTT_DBG_EXT_STATS_PDEV_RX;
6931 	req.cookie_val = 1;
6932 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
6933 				req.param1, req.param2, req.param3, 0,
6934 				req.cookie_val, 0);
6935 
6936 	msleep(DP_MAX_SLEEP_TIME);
6937 	qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
6938 
6939 	return TXRX_STATS_LEVEL;
6940 }
6941 
6942 /**
6943  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
6944  * @pdev: DP_PDEV handle
6945  * @map_id: ID of map that needs to be updated
6946  * @tos: index value in map
6947  * @tid: tid value passed by the user
6948  *
6949  * Return: void
6950  */
6951 static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
6952 		uint8_t map_id, uint8_t tos, uint8_t tid)
6953 {
6954 	uint8_t dscp;
6955 	struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
6956 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
6957 	pdev->dscp_tid_map[map_id][dscp] = tid;
6958 	if (map_id < HAL_MAX_HW_DSCP_TID_MAPS)
6959 		hal_tx_update_dscp_tid(pdev->soc->hal_soc, tid,
6960 			map_id, dscp);
6961 	return;
6962 }
6963 
6964 /**
6965  * dp_fw_stats_process(): Process TxRX FW stats request
6966  * @vdev_handle: DP VDEV handle
6967  * @req: stats request
6968  *
6969  * return: int
6970  */
6971 static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
6972 		struct cdp_txrx_stats_req *req)
6973 {
6974 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6975 	struct dp_pdev *pdev = NULL;
6976 	uint32_t stats = req->stats;
6977 	uint8_t mac_id = req->mac_id;
6978 
6979 	if (!vdev) {
6980 		DP_TRACE(NONE, "VDEV not found");
6981 		return 1;
6982 	}
6983 	pdev = vdev->pdev;
6984 
6985 	/*
6986 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
6987 	 * from param0 to param3 according to below rule:
6988 	 *
6989 	 * PARAM:
6990 	 *   - config_param0 : start_offset (stats type)
6991 	 *   - config_param1 : stats bmask from start offset
6992 	 *   - config_param2 : stats bmask from start offset + 32
6993 	 *   - config_param3 : stats bmask from start offset + 64
6994 	 */
6995 	if (req->stats == CDP_TXRX_STATS_0) {
6996 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
6997 		req->param1 = 0xFFFFFFFF;
6998 		req->param2 = 0xFFFFFFFF;
6999 		req->param3 = 0xFFFFFFFF;
7000 	}
7001 
7002 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
7003 				req->param1, req->param2, req->param3,
7004 				0, 0, mac_id);
7005 }
7006 
7007 /**
7008  * dp_txrx_stats_request - function to map to firmware and host stats
7009  * @vdev: virtual handle
7010  * @req: stats request
7011  *
7012  * Return: QDF_STATUS
7013  */
7014 static
7015 QDF_STATUS dp_txrx_stats_request(struct cdp_vdev *vdev,
7016 				 struct cdp_txrx_stats_req *req)
7017 {
7018 	int host_stats;
7019 	int fw_stats;
7020 	enum cdp_stats stats;
7021 	int num_stats;
7022 
7023 	if (!vdev || !req) {
7024 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7025 				"Invalid vdev/req instance");
7026 		return QDF_STATUS_E_INVAL;
7027 	}
7028 
7029 	stats = req->stats;
7030 	if (stats >= CDP_TXRX_MAX_STATS)
7031 		return QDF_STATUS_E_INVAL;
7032 
7033 	/*
7034 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
7035 	 *			has to be updated if new FW HTT stats added
7036 	 */
7037 	if (stats > CDP_TXRX_STATS_HTT_MAX)
7038 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
7039 
7040 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
7041 
7042 	if (stats >= num_stats) {
7043 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7044 			  "%s: Invalid stats option: %d", __func__, stats);
7045 		return QDF_STATUS_E_INVAL;
7046 	}
7047 
7048 	req->stats = stats;
7049 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
7050 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
7051 
7052 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7053 		 "stats: %u fw_stats_type: %d host_stats: %d",
7054 		  stats, fw_stats, host_stats);
7055 
7056 	if (fw_stats != TXRX_FW_STATS_INVALID) {
7057 		/* update request with FW stats type */
7058 		req->stats = fw_stats;
7059 		return dp_fw_stats_process(vdev, req);
7060 	}
7061 
7062 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
7063 			(host_stats <= TXRX_HOST_STATS_MAX))
7064 		return dp_print_host_stats(vdev, req);
7065 	else
7066 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7067 				"Wrong Input for TxRx Stats");
7068 
7069 	return QDF_STATUS_SUCCESS;
7070 }
7071 
7072 /*
7073  * dp_print_napi_stats(): NAPI stats
7074  * @soc - soc handle
7075  */
7076 static void dp_print_napi_stats(struct dp_soc *soc)
7077 {
7078 	hif_print_napi_stats(soc->hif_handle);
7079 }
7080 
7081 /*
7082  * dp_print_per_ring_stats(): Packet count per ring
7083  * @soc - soc handle
7084  */
7085 static void dp_print_per_ring_stats(struct dp_soc *soc)
7086 {
7087 	uint8_t ring;
7088 	uint16_t core;
7089 	uint64_t total_packets;
7090 
7091 	DP_TRACE_STATS(INFO_HIGH, "Reo packets per ring:");
7092 	for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
7093 		total_packets = 0;
7094 		DP_TRACE_STATS(INFO_HIGH,
7095 			       "Packets on ring %u:", ring);
7096 		for (core = 0; core < NR_CPUS; core++) {
7097 			DP_TRACE_STATS(INFO_HIGH,
7098 				       "Packets arriving on core %u: %llu",
7099 				       core,
7100 				       soc->stats.rx.ring_packets[core][ring]);
7101 			total_packets += soc->stats.rx.ring_packets[core][ring];
7102 		}
7103 		DP_TRACE_STATS(INFO_HIGH,
7104 			       "Total packets on ring %u: %llu",
7105 			       ring, total_packets);
7106 	}
7107 }
7108 
7109 /*
7110  * dp_txrx_path_stats() - Function to display dump stats
7111  * @soc - soc handle
7112  *
7113  * return: none
7114  */
7115 static void dp_txrx_path_stats(struct dp_soc *soc)
7116 {
7117 	uint8_t error_code;
7118 	uint8_t loop_pdev;
7119 	struct dp_pdev *pdev;
7120 	uint8_t i;
7121 
7122 	if (!soc) {
7123 		DP_TRACE(ERROR, "%s: Invalid access",
7124 			 __func__);
7125 		return;
7126 	}
7127 
7128 	for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
7129 
7130 		pdev = soc->pdev_list[loop_pdev];
7131 		dp_aggregate_pdev_stats(pdev);
7132 		DP_TRACE_STATS(INFO_HIGH, "Tx path Statistics:");
7133 		DP_TRACE_STATS(INFO_HIGH, "from stack: %u msdus (%llu bytes)",
7134 			       pdev->stats.tx_i.rcvd.num,
7135 			       pdev->stats.tx_i.rcvd.bytes);
7136 		DP_TRACE_STATS(INFO_HIGH,
7137 			       "processed from host: %u msdus (%llu bytes)",
7138 			       pdev->stats.tx_i.processed.num,
7139 			       pdev->stats.tx_i.processed.bytes);
7140 		DP_TRACE_STATS(INFO_HIGH,
7141 			       "successfully transmitted: %u msdus (%llu bytes)",
7142 			       pdev->stats.tx.tx_success.num,
7143 			       pdev->stats.tx.tx_success.bytes);
7144 
7145 		DP_TRACE_STATS(INFO_HIGH, "Dropped in host:");
7146 		DP_TRACE_STATS(INFO_HIGH, "Total packets dropped: %u,",
7147 			       pdev->stats.tx_i.dropped.dropped_pkt.num);
7148 		DP_TRACE_STATS(INFO_HIGH, "Descriptor not available: %u",
7149 			       pdev->stats.tx_i.dropped.desc_na.num);
7150 		DP_TRACE_STATS(INFO_HIGH, "Ring full: %u",
7151 			       pdev->stats.tx_i.dropped.ring_full);
7152 		DP_TRACE_STATS(INFO_HIGH, "Enqueue fail: %u",
7153 			       pdev->stats.tx_i.dropped.enqueue_fail);
7154 		DP_TRACE_STATS(INFO_HIGH, "DMA Error: %u",
7155 			       pdev->stats.tx_i.dropped.dma_error);
7156 
7157 		DP_TRACE_STATS(INFO_HIGH, "Dropped in hardware:");
7158 		DP_TRACE_STATS(INFO_HIGH, "total packets dropped: %u",
7159 			       pdev->stats.tx.tx_failed);
7160 		DP_TRACE_STATS(INFO_HIGH, "mpdu age out: %u",
7161 			       pdev->stats.tx.dropped.age_out);
7162 		DP_TRACE_STATS(INFO_HIGH, "firmware removed: %u",
7163 			       pdev->stats.tx.dropped.fw_rem);
7164 		DP_TRACE_STATS(INFO_HIGH, "firmware removed tx: %u",
7165 			       pdev->stats.tx.dropped.fw_rem_tx);
7166 		DP_TRACE_STATS(INFO_HIGH, "firmware removed notx %u",
7167 			       pdev->stats.tx.dropped.fw_rem_notx);
7168 		DP_TRACE_STATS(INFO_HIGH, "peer_invalid: %u",
7169 			       pdev->soc->stats.tx.tx_invalid_peer.num);
7170 
7171 		DP_TRACE_STATS(INFO_HIGH, "Tx packets sent per interrupt:");
7172 		DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
7173 			       pdev->stats.tx_comp_histogram.pkts_1);
7174 		DP_TRACE_STATS(INFO_HIGH, "2-20 Packets:  %u",
7175 			       pdev->stats.tx_comp_histogram.pkts_2_20);
7176 		DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
7177 			       pdev->stats.tx_comp_histogram.pkts_21_40);
7178 		DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
7179 			       pdev->stats.tx_comp_histogram.pkts_41_60);
7180 		DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
7181 			       pdev->stats.tx_comp_histogram.pkts_61_80);
7182 		DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
7183 			       pdev->stats.tx_comp_histogram.pkts_81_100);
7184 		DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
7185 			       pdev->stats.tx_comp_histogram.pkts_101_200);
7186 		DP_TRACE_STATS(INFO_HIGH, "   201+ Packets: %u",
7187 			       pdev->stats.tx_comp_histogram.pkts_201_plus);
7188 
7189 		DP_TRACE_STATS(INFO_HIGH, "Rx path statistics");
7190 
7191 		DP_TRACE_STATS(INFO_HIGH,
7192 			       "delivered %u msdus ( %llu bytes),",
7193 			       pdev->stats.rx.to_stack.num,
7194 			       pdev->stats.rx.to_stack.bytes);
7195 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)
7196 			DP_TRACE_STATS(INFO_HIGH,
7197 				       "received on reo[%d] %u msdus( %llu bytes),",
7198 				       i, pdev->stats.rx.rcvd_reo[i].num,
7199 				       pdev->stats.rx.rcvd_reo[i].bytes);
7200 		DP_TRACE_STATS(INFO_HIGH,
7201 			       "intra-bss packets %u msdus ( %llu bytes),",
7202 			       pdev->stats.rx.intra_bss.pkts.num,
7203 			       pdev->stats.rx.intra_bss.pkts.bytes);
7204 		DP_TRACE_STATS(INFO_HIGH,
7205 			       "intra-bss fails %u msdus ( %llu bytes),",
7206 			       pdev->stats.rx.intra_bss.fail.num,
7207 			       pdev->stats.rx.intra_bss.fail.bytes);
7208 		DP_TRACE_STATS(INFO_HIGH,
7209 			       "raw packets %u msdus ( %llu bytes),",
7210 			       pdev->stats.rx.raw.num,
7211 			       pdev->stats.rx.raw.bytes);
7212 		DP_TRACE_STATS(INFO_HIGH, "dropped: error %u msdus",
7213 			       pdev->stats.rx.err.mic_err);
7214 		DP_TRACE_STATS(INFO_HIGH, "peer invalid %u",
7215 			       pdev->soc->stats.rx.err.rx_invalid_peer.num);
7216 
7217 		DP_TRACE_STATS(INFO_HIGH, "Reo Statistics");
7218 		DP_TRACE_STATS(INFO_HIGH, "rbm error: %u msdus",
7219 			       pdev->soc->stats.rx.err.invalid_rbm);
7220 		DP_TRACE_STATS(INFO_HIGH, "hal ring access fail: %u msdus",
7221 			       pdev->soc->stats.rx.err.hal_ring_access_fail);
7222 
7223 		for (error_code = 0; error_code < HAL_REO_ERR_MAX;
7224 				error_code++) {
7225 			if (!pdev->soc->stats.rx.err.reo_error[error_code])
7226 				continue;
7227 			DP_TRACE_STATS(INFO_HIGH,
7228 				       "Reo error number (%u): %u msdus",
7229 				       error_code,
7230 				       pdev->soc->stats.rx.err
7231 				       .reo_error[error_code]);
7232 		}
7233 
7234 		for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
7235 				error_code++) {
7236 			if (!pdev->soc->stats.rx.err.rxdma_error[error_code])
7237 				continue;
7238 			DP_TRACE_STATS(INFO_HIGH,
7239 				       "Rxdma error number (%u): %u msdus",
7240 				       error_code,
7241 				       pdev->soc->stats.rx.err
7242 				       .rxdma_error[error_code]);
7243 		}
7244 
7245 		DP_TRACE_STATS(INFO_HIGH, "Rx packets reaped per interrupt:");
7246 		DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
7247 			       pdev->stats.rx_ind_histogram.pkts_1);
7248 		DP_TRACE_STATS(INFO_HIGH, "2-20 Packets:  %u",
7249 			       pdev->stats.rx_ind_histogram.pkts_2_20);
7250 		DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
7251 			       pdev->stats.rx_ind_histogram.pkts_21_40);
7252 		DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
7253 			       pdev->stats.rx_ind_histogram.pkts_41_60);
7254 		DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
7255 			       pdev->stats.rx_ind_histogram.pkts_61_80);
7256 		DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
7257 			       pdev->stats.rx_ind_histogram.pkts_81_100);
7258 		DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
7259 			       pdev->stats.rx_ind_histogram.pkts_101_200);
7260 		DP_TRACE_STATS(INFO_HIGH, "   201+ Packets: %u",
7261 			       pdev->stats.rx_ind_histogram.pkts_201_plus);
7262 
7263 		DP_TRACE_STATS(INFO_HIGH, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
7264 			       __func__,
7265 			       pdev->soc->wlan_cfg_ctx
7266 			       ->tso_enabled,
7267 			       pdev->soc->wlan_cfg_ctx
7268 			       ->lro_enabled,
7269 			       pdev->soc->wlan_cfg_ctx
7270 			       ->rx_hash,
7271 			       pdev->soc->wlan_cfg_ctx
7272 			       ->napi_enabled);
7273 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7274 		DP_TRACE_STATS(INFO_HIGH, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
7275 			       __func__,
7276 			       pdev->soc->wlan_cfg_ctx
7277 			       ->tx_flow_stop_queue_threshold,
7278 			       pdev->soc->wlan_cfg_ctx
7279 			       ->tx_flow_start_queue_offset);
7280 #endif
7281 	}
7282 }
7283 
7284 /*
7285  * dp_txrx_dump_stats() -  Dump statistics
7286  * @value - Statistics option
7287  */
7288 static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
7289 				     enum qdf_stats_verbosity_level level)
7290 {
7291 	struct dp_soc *soc =
7292 		(struct dp_soc *)psoc;
7293 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7294 
7295 	if (!soc) {
7296 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7297 			"%s: soc is NULL", __func__);
7298 		return QDF_STATUS_E_INVAL;
7299 	}
7300 
7301 	switch (value) {
7302 	case CDP_TXRX_PATH_STATS:
7303 		dp_txrx_path_stats(soc);
7304 		break;
7305 
7306 	case CDP_RX_RING_STATS:
7307 		dp_print_per_ring_stats(soc);
7308 		break;
7309 
7310 	case CDP_TXRX_TSO_STATS:
7311 		/* TODO: NOT IMPLEMENTED */
7312 		break;
7313 
7314 	case CDP_DUMP_TX_FLOW_POOL_INFO:
7315 		cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
7316 		break;
7317 
7318 	case CDP_DP_NAPI_STATS:
7319 		dp_print_napi_stats(soc);
7320 		break;
7321 
7322 	case CDP_TXRX_DESC_STATS:
7323 		/* TODO: NOT IMPLEMENTED */
7324 		break;
7325 
7326 	default:
7327 		status = QDF_STATUS_E_INVAL;
7328 		break;
7329 	}
7330 
7331 	return status;
7332 
7333 }
7334 
7335 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7336 /**
7337  * dp_update_flow_control_parameters() - API to store datapath
7338  *                            config parameters
7339  * @soc: soc handle
7340  * @cfg: ini parameter handle
7341  *
7342  * Return: void
7343  */
7344 static inline
7345 void dp_update_flow_control_parameters(struct dp_soc *soc,
7346 				struct cdp_config_params *params)
7347 {
7348 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
7349 					params->tx_flow_stop_queue_threshold;
7350 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
7351 					params->tx_flow_start_queue_offset;
7352 }
7353 #else
7354 static inline
7355 void dp_update_flow_control_parameters(struct dp_soc *soc,
7356 				struct cdp_config_params *params)
7357 {
7358 }
7359 #endif
7360 
7361 /**
7362  * dp_update_config_parameters() - API to store datapath
7363  *                            config parameters
7364  * @soc: soc handle
7365  * @cfg: ini parameter handle
7366  *
7367  * Return: status
7368  */
7369 static
7370 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
7371 				struct cdp_config_params *params)
7372 {
7373 	struct dp_soc *soc = (struct dp_soc *)psoc;
7374 
7375 	if (!(soc)) {
7376 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7377 				"%s: Invalid handle", __func__);
7378 		return QDF_STATUS_E_INVAL;
7379 	}
7380 
7381 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
7382 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
7383 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
7384 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
7385 				params->tcp_udp_checksumoffload;
7386 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
7387 	dp_update_flow_control_parameters(soc, params);
7388 
7389 	return QDF_STATUS_SUCCESS;
7390 }
7391 
7392 /**
7393  * dp_txrx_set_wds_rx_policy() - API to store datapath
7394  *                            config parameters
7395  * @vdev_handle - datapath vdev handle
7396  * @cfg: ini parameter handle
7397  *
7398  * Return: status
7399  */
7400 #ifdef WDS_VENDOR_EXTENSION
7401 void
7402 dp_txrx_set_wds_rx_policy(
7403 		struct cdp_vdev *vdev_handle,
7404 		u_int32_t val)
7405 {
7406 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7407 	struct dp_peer *peer;
7408 	if (vdev->opmode == wlan_op_mode_ap) {
7409 		/* for ap, set it on bss_peer */
7410 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
7411 			if (peer->bss_peer) {
7412 				peer->wds_ecm.wds_rx_filter = 1;
7413 				peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7414 				peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7415 				break;
7416 			}
7417 		}
7418 	} else if (vdev->opmode == wlan_op_mode_sta) {
7419 		peer = TAILQ_FIRST(&vdev->peer_list);
7420 		peer->wds_ecm.wds_rx_filter = 1;
7421 		peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7422 		peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7423 	}
7424 }
7425 
7426 /**
7427  * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
7428  *
7429  * @peer_handle - datapath peer handle
7430  * @wds_tx_ucast: policy for unicast transmission
7431  * @wds_tx_mcast: policy for multicast transmission
7432  *
7433  * Return: void
7434  */
7435 void
7436 dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
7437 		int wds_tx_ucast, int wds_tx_mcast)
7438 {
7439 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
7440 	if (wds_tx_ucast || wds_tx_mcast) {
7441 		peer->wds_enabled = 1;
7442 		peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
7443 		peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
7444 	} else {
7445 		peer->wds_enabled = 0;
7446 		peer->wds_ecm.wds_tx_ucast_4addr = 0;
7447 		peer->wds_ecm.wds_tx_mcast_4addr = 0;
7448 	}
7449 
7450 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7451 			FL("Policy Update set to :\
7452 				peer->wds_enabled %d\
7453 				peer->wds_ecm.wds_tx_ucast_4addr %d\
7454 				peer->wds_ecm.wds_tx_mcast_4addr %d"),
7455 				peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
7456 				peer->wds_ecm.wds_tx_mcast_4addr);
7457 	return;
7458 }
7459 #endif
7460 
7461 static struct cdp_wds_ops dp_ops_wds = {
7462 	.vdev_set_wds = dp_vdev_set_wds,
7463 #ifdef WDS_VENDOR_EXTENSION
7464 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
7465 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
7466 #endif
7467 };
7468 
7469 /*
7470  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
7471  * @vdev_handle - datapath vdev handle
7472  * @callback - callback function
7473  * @ctxt: callback context
7474  *
7475  */
7476 static void
7477 dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
7478 		       ol_txrx_data_tx_cb callback, void *ctxt)
7479 {
7480 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7481 
7482 	vdev->tx_non_std_data_callback.func = callback;
7483 	vdev->tx_non_std_data_callback.ctxt = ctxt;
7484 }
7485 
7486 /**
7487  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
7488  * @pdev_hdl: datapath pdev handle
7489  *
7490  * Return: opaque pointer to dp txrx handle
7491  */
7492 static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
7493 {
7494 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7495 
7496 	return pdev->dp_txrx_handle;
7497 }
7498 
7499 /**
7500  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
7501  * @pdev_hdl: datapath pdev handle
7502  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
7503  *
7504  * Return: void
7505  */
7506 static void
7507 dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
7508 {
7509 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7510 
7511 	pdev->dp_txrx_handle = dp_txrx_hdl;
7512 }
7513 
7514 /**
7515  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
7516  * @soc_handle: datapath soc handle
7517  *
7518  * Return: opaque pointer to external dp (non-core DP)
7519  */
7520 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
7521 {
7522 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
7523 
7524 	return soc->external_txrx_handle;
7525 }
7526 
7527 /**
7528  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
7529  * @soc_handle: datapath soc handle
7530  * @txrx_handle: opaque pointer to external dp (non-core DP)
7531  *
7532  * Return: void
7533  */
7534 static void
7535 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
7536 {
7537 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
7538 
7539 	soc->external_txrx_handle = txrx_handle;
7540 }
7541 
7542 #ifdef FEATURE_AST
7543 static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
7544 {
7545 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
7546 	struct dp_peer *peer = (struct dp_peer *) peer_hdl;
7547 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7548 
7549 	/*
7550 	 * For BSS peer, new peer is not created on alloc_node if the
7551 	 * peer with same address already exists , instead refcnt is
7552 	 * increased for existing peer. Correspondingly in delete path,
7553 	 * only refcnt is decreased; and peer is only deleted , when all
7554 	 * references are deleted. So delete_in_progress should not be set
7555 	 * for bss_peer, unless only 2 reference remains (peer map reference
7556 	 * and peer hash table reference).
7557 	 */
7558 	if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
7559 		return;
7560 	}
7561 
7562 	peer->delete_in_progress = true;
7563 	dp_peer_delete_ast_entries(soc, peer);
7564 }
7565 #endif
7566 
7567 #ifdef ATH_SUPPORT_NAC_RSSI
7568 /**
7569  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
7570  * @vdev_hdl: DP vdev handle
7571  * @rssi: rssi value
7572  *
7573  * Return: 0 for success. nonzero for failure.
7574  */
7575 QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
7576 				       char *mac_addr,
7577 				       uint8_t *rssi)
7578 {
7579 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7580 	struct dp_pdev *pdev = vdev->pdev;
7581 	struct dp_neighbour_peer *peer = NULL;
7582 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
7583 
7584 	*rssi = 0;
7585 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
7586 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
7587 		      neighbour_peer_list_elem) {
7588 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
7589 				mac_addr, DP_MAC_ADDR_LEN) == 0) {
7590 			*rssi = peer->rssi;
7591 			status = QDF_STATUS_SUCCESS;
7592 			break;
7593 		}
7594 	}
7595 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
7596 	return status;
7597 }
7598 
7599 static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
7600 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
7601 		uint8_t chan_num)
7602 {
7603 
7604 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7605 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7606 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7607 
7608 	pdev->nac_rssi_filtering = 1;
7609 	/* Store address of NAC (neighbour peer) which will be checked
7610 	 * against TA of received packets.
7611 	 */
7612 
7613 	if (cmd == CDP_NAC_PARAM_ADD) {
7614 		dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
7615 						 client_macaddr);
7616 	} else if (cmd == CDP_NAC_PARAM_DEL) {
7617 		dp_update_filter_neighbour_peers(vdev_handle,
7618 						 DP_NAC_PARAM_DEL,
7619 						 client_macaddr);
7620 	}
7621 
7622 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
7623 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
7624 			((void *)vdev->pdev->ctrl_pdev,
7625 			 vdev->vdev_id, cmd, bssid);
7626 
7627 	return QDF_STATUS_SUCCESS;
7628 }
7629 #endif
7630 
7631 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
7632 		uint32_t max_peers)
7633 {
7634 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7635 
7636 	soc->max_peers = max_peers;
7637 
7638 	qdf_print ("%s max_peers %u\n", __func__, max_peers);
7639 
7640 	if (dp_peer_find_attach(soc))
7641 		return QDF_STATUS_E_FAILURE;
7642 
7643 	return QDF_STATUS_SUCCESS;
7644 }
7645 
7646 /**
7647  * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
7648  * @dp_pdev: dp pdev handle
7649  * @ctrl_pdev: UMAC ctrl pdev handle
7650  *
7651  * Return: void
7652  */
7653 static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
7654 				  struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
7655 {
7656 	struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
7657 
7658 	pdev->ctrl_pdev = ctrl_pdev;
7659 }
7660 
7661 static struct cdp_cmn_ops dp_ops_cmn = {
7662 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
7663 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
7664 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
7665 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
7666 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
7667 	.txrx_peer_create = dp_peer_create_wifi3,
7668 	.txrx_peer_setup = dp_peer_setup_wifi3,
7669 #ifdef FEATURE_AST
7670 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
7671 #else
7672 	.txrx_peer_teardown = NULL,
7673 #endif
7674 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
7675 	.txrx_peer_del_ast = dp_peer_del_ast_wifi3,
7676 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
7677 	.txrx_peer_ast_hash_find = dp_peer_ast_hash_find_wifi3,
7678 	.txrx_peer_ast_get_pdev_id = dp_peer_ast_get_pdev_id_wifi3,
7679 	.txrx_peer_ast_get_next_hop = dp_peer_ast_get_next_hop_wifi3,
7680 	.txrx_peer_ast_set_type = dp_peer_ast_set_type_wifi3,
7681 	.txrx_peer_ast_get_type = dp_peer_ast_get_type_wifi3,
7682 	.txrx_peer_delete = dp_peer_delete_wifi3,
7683 	.txrx_vdev_register = dp_vdev_register_wifi3,
7684 	.txrx_soc_detach = dp_soc_detach_wifi3,
7685 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
7686 	.txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
7687 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
7688 	.txrx_ath_getstats = dp_get_device_stats,
7689 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
7690 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
7691 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
7692 	.delba_process = dp_delba_process_wifi3,
7693 	.set_addba_response = dp_set_addba_response,
7694 	.get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
7695 	.flush_cache_rx_queue = NULL,
7696 	/* TODO: get API's for dscp-tid need to be added*/
7697 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
7698 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
7699 	.txrx_stats_request = dp_txrx_stats_request,
7700 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
7701 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
7702 	.txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
7703 	.txrx_set_nac = dp_set_nac,
7704 	.txrx_get_tx_pending = dp_get_tx_pending,
7705 	.txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
7706 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
7707 	.display_stats = dp_txrx_dump_stats,
7708 	.txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
7709 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
7710 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
7711 	.txrx_intr_detach = dp_soc_interrupt_detach,
7712 	.set_pn_check = dp_set_pn_check_wifi3,
7713 	.update_config_parameters = dp_update_config_parameters,
7714 	/* TODO: Add other functions */
7715 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
7716 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
7717 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
7718 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
7719 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
7720 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
7721 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
7722 	.tx_send = dp_tx_send,
7723 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
7724 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
7725 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
7726 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
7727 	.txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
7728 	.txrx_get_os_rx_handles_from_vdev =
7729 					dp_get_os_rx_handles_from_vdev_wifi3,
7730 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
7731 };
7732 
7733 static struct cdp_ctrl_ops dp_ops_ctrl = {
7734 	.txrx_peer_authorize = dp_peer_authorize,
7735 #ifdef QCA_SUPPORT_SON
7736 	.txrx_set_inact_params = dp_set_inact_params,
7737 	.txrx_start_inact_timer = dp_start_inact_timer,
7738 	.txrx_set_overload = dp_set_overload,
7739 	.txrx_peer_is_inact = dp_peer_is_inact,
7740 	.txrx_mark_peer_inact = dp_mark_peer_inact,
7741 #endif
7742 	.txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
7743 	.txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
7744 #ifdef MESH_MODE_SUPPORT
7745 	.txrx_set_mesh_mode  = dp_peer_set_mesh_mode,
7746 	.txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
7747 #endif
7748 	.txrx_set_vdev_param = dp_set_vdev_param,
7749 	.txrx_peer_set_nawds = dp_peer_set_nawds,
7750 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
7751 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
7752 	.txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
7753 	.txrx_update_filter_neighbour_peers =
7754 		dp_update_filter_neighbour_peers,
7755 	.txrx_get_sec_type = dp_get_sec_type,
7756 	/* TODO: Add other functions */
7757 	.txrx_wdi_event_sub = dp_wdi_event_sub,
7758 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
7759 #ifdef WDI_EVENT_ENABLE
7760 	.txrx_get_pldev = dp_get_pldev,
7761 #endif
7762 	.txrx_set_pdev_param = dp_set_pdev_param,
7763 #ifdef ATH_SUPPORT_NAC_RSSI
7764 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
7765 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
7766 #endif
7767 	.set_key = dp_set_michael_key,
7768 };
7769 
7770 static struct cdp_me_ops dp_ops_me = {
7771 #ifdef ATH_SUPPORT_IQUE
7772 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
7773 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
7774 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
7775 #endif
7776 };
7777 
7778 static struct cdp_mon_ops dp_ops_mon = {
7779 	.txrx_monitor_set_filter_ucast_data = NULL,
7780 	.txrx_monitor_set_filter_mcast_data = NULL,
7781 	.txrx_monitor_set_filter_non_data = NULL,
7782 	.txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
7783 	.txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
7784 	.txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
7785 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
7786 	/* Added support for HK advance filter */
7787 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
7788 };
7789 
7790 static struct cdp_host_stats_ops dp_ops_host_stats = {
7791 	.txrx_per_peer_stats = dp_get_host_peer_stats,
7792 	.get_fw_peer_stats = dp_get_fw_peer_stats,
7793 	.get_htt_stats = dp_get_htt_stats,
7794 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
7795 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
7796 	.txrx_stats_publish = dp_txrx_stats_publish,
7797 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
7798 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
7799 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
7800 	/* TODO */
7801 };
7802 
7803 static struct cdp_raw_ops dp_ops_raw = {
7804 	/* TODO */
7805 };
7806 
7807 #ifdef CONFIG_WIN
7808 static struct cdp_pflow_ops dp_ops_pflow = {
7809 	/* TODO */
7810 };
7811 #endif /* CONFIG_WIN */
7812 
7813 #ifdef FEATURE_RUNTIME_PM
7814 /**
7815  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
7816  * @opaque_pdev: DP pdev context
7817  *
7818  * DP is ready to runtime suspend if there are no pending TX packets.
7819  *
7820  * Return: QDF_STATUS
7821  */
7822 static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
7823 {
7824 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7825 	struct dp_soc *soc = pdev->soc;
7826 
7827 	/* Abort if there are any pending TX packets */
7828 	if (dp_get_tx_pending(opaque_pdev) > 0) {
7829 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7830 			  FL("Abort suspend due to pending TX packets"));
7831 		return QDF_STATUS_E_AGAIN;
7832 	}
7833 
7834 	if (soc->intr_mode == DP_INTR_POLL)
7835 		qdf_timer_stop(&soc->int_timer);
7836 
7837 	return QDF_STATUS_SUCCESS;
7838 }
7839 
7840 /**
7841  * dp_runtime_resume() - ensure DP is ready to runtime resume
7842  * @opaque_pdev: DP pdev context
7843  *
7844  * Resume DP for runtime PM.
7845  *
7846  * Return: QDF_STATUS
7847  */
7848 static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
7849 {
7850 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7851 	struct dp_soc *soc = pdev->soc;
7852 	void *hal_srng;
7853 	int i;
7854 
7855 	if (soc->intr_mode == DP_INTR_POLL)
7856 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7857 
7858 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
7859 		hal_srng = soc->tcl_data_ring[i].hal_srng;
7860 		if (hal_srng) {
7861 			/* We actually only need to acquire the lock */
7862 			hal_srng_access_start(soc->hal_soc, hal_srng);
7863 			/* Update SRC ring head pointer for HW to send
7864 			   all pending packets */
7865 			hal_srng_access_end(soc->hal_soc, hal_srng);
7866 		}
7867 	}
7868 
7869 	return QDF_STATUS_SUCCESS;
7870 }
7871 #endif /* FEATURE_RUNTIME_PM */
7872 
7873 static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
7874 {
7875 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7876 	struct dp_soc *soc = pdev->soc;
7877 
7878 	if (soc->intr_mode == DP_INTR_POLL)
7879 		qdf_timer_stop(&soc->int_timer);
7880 
7881 	return QDF_STATUS_SUCCESS;
7882 }
7883 
7884 static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
7885 {
7886 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7887 	struct dp_soc *soc = pdev->soc;
7888 
7889 	if (soc->intr_mode == DP_INTR_POLL)
7890 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7891 
7892 	return QDF_STATUS_SUCCESS;
7893 }
7894 
7895 #ifndef CONFIG_WIN
7896 static struct cdp_misc_ops dp_ops_misc = {
7897 	.tx_non_std = dp_tx_non_std,
7898 	.get_opmode = dp_get_opmode,
7899 #ifdef FEATURE_RUNTIME_PM
7900 	.runtime_suspend = dp_runtime_suspend,
7901 	.runtime_resume = dp_runtime_resume,
7902 #endif /* FEATURE_RUNTIME_PM */
7903 	.pkt_log_init = dp_pkt_log_init,
7904 	.pkt_log_con_service = dp_pkt_log_con_service,
7905 };
7906 
7907 static struct cdp_flowctl_ops dp_ops_flowctl = {
7908 	/* WIFI 3.0 DP implement as required. */
7909 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7910 	.flow_pool_map_handler = dp_tx_flow_pool_map,
7911 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
7912 	.register_pause_cb = dp_txrx_register_pause_cb,
7913 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
7914 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
7915 };
7916 
7917 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
7918 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7919 };
7920 
7921 #ifdef IPA_OFFLOAD
7922 static struct cdp_ipa_ops dp_ops_ipa = {
7923 	.ipa_get_resource = dp_ipa_get_resource,
7924 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
7925 	.ipa_op_response = dp_ipa_op_response,
7926 	.ipa_register_op_cb = dp_ipa_register_op_cb,
7927 	.ipa_get_stat = dp_ipa_get_stat,
7928 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
7929 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
7930 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
7931 	.ipa_setup = dp_ipa_setup,
7932 	.ipa_cleanup = dp_ipa_cleanup,
7933 	.ipa_setup_iface = dp_ipa_setup_iface,
7934 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
7935 	.ipa_enable_pipes = dp_ipa_enable_pipes,
7936 	.ipa_disable_pipes = dp_ipa_disable_pipes,
7937 	.ipa_set_perf_level = dp_ipa_set_perf_level
7938 };
7939 #endif
7940 
7941 static struct cdp_bus_ops dp_ops_bus = {
7942 	.bus_suspend = dp_bus_suspend,
7943 	.bus_resume = dp_bus_resume
7944 };
7945 
7946 static struct cdp_ocb_ops dp_ops_ocb = {
7947 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7948 };
7949 
7950 
7951 static struct cdp_throttle_ops dp_ops_throttle = {
7952 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7953 };
7954 
7955 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
7956 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7957 };
7958 
7959 static struct cdp_cfg_ops dp_ops_cfg = {
7960 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7961 };
7962 
7963 /*
7964  * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
7965  * @dev: physical device instance
7966  * @peer_mac_addr: peer mac address
7967  * @local_id: local id for the peer
7968  * @debug_id: to track enum peer access
7969  *
7970  * Return: peer instance pointer
7971  */
7972 static inline void *
7973 dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, u8 *peer_mac_addr,
7974 			     u8 *local_id, enum peer_debug_id_type debug_id)
7975 {
7976 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
7977 	struct dp_peer *peer;
7978 
7979 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
7980 
7981 	if (!peer)
7982 		return NULL;
7983 
7984 	*local_id = peer->local_id;
7985 	DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
7986 
7987 	return peer;
7988 }
7989 
7990 /*
7991  * dp_peer_release_ref - release peer ref count
7992  * @peer: peer handle
7993  * @debug_id: to track enum peer access
7994  *
7995  * Return: None
7996  */
7997 static inline
7998 void dp_peer_release_ref(void *peer, enum peer_debug_id_type debug_id)
7999 {
8000 	dp_peer_unref_delete(peer);
8001 }
8002 
8003 static struct cdp_peer_ops dp_ops_peer = {
8004 	.register_peer = dp_register_peer,
8005 	.clear_peer = dp_clear_peer,
8006 	.find_peer_by_addr = dp_find_peer_by_addr,
8007 	.find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
8008 	.peer_get_ref_by_addr = dp_peer_get_ref_find_by_addr,
8009 	.peer_release_ref = dp_peer_release_ref,
8010 	.local_peer_id = dp_local_peer_id,
8011 	.peer_find_by_local_id = dp_peer_find_by_local_id,
8012 	.peer_state_update = dp_peer_state_update,
8013 	.get_vdevid = dp_get_vdevid,
8014 	.get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
8015 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
8016 	.get_vdev_for_peer = dp_get_vdev_for_peer,
8017 	.get_peer_state = dp_get_peer_state,
8018 };
8019 #endif
8020 
8021 static struct cdp_ops dp_txrx_ops = {
8022 	.cmn_drv_ops = &dp_ops_cmn,
8023 	.ctrl_ops = &dp_ops_ctrl,
8024 	.me_ops = &dp_ops_me,
8025 	.mon_ops = &dp_ops_mon,
8026 	.host_stats_ops = &dp_ops_host_stats,
8027 	.wds_ops = &dp_ops_wds,
8028 	.raw_ops = &dp_ops_raw,
8029 #ifdef CONFIG_WIN
8030 	.pflow_ops = &dp_ops_pflow,
8031 #endif /* CONFIG_WIN */
8032 #ifndef CONFIG_WIN
8033 	.misc_ops = &dp_ops_misc,
8034 	.cfg_ops = &dp_ops_cfg,
8035 	.flowctl_ops = &dp_ops_flowctl,
8036 	.l_flowctl_ops = &dp_ops_l_flowctl,
8037 #ifdef IPA_OFFLOAD
8038 	.ipa_ops = &dp_ops_ipa,
8039 #endif
8040 	.bus_ops = &dp_ops_bus,
8041 	.ocb_ops = &dp_ops_ocb,
8042 	.peer_ops = &dp_ops_peer,
8043 	.throttle_ops = &dp_ops_throttle,
8044 	.mob_stats_ops = &dp_ops_mob_stats,
8045 #endif
8046 };
8047 
8048 /*
8049  * dp_soc_set_txrx_ring_map()
8050  * @dp_soc: DP handler for soc
8051  *
8052  * Return: Void
8053  */
8054 static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
8055 {
8056 	uint32_t i;
8057 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
8058 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_DEFAULT_MAP][i];
8059 	}
8060 }
8061 
8062 #ifdef QCA_WIFI_QCA8074
8063 /**
8064  * dp_soc_attach_wifi3() - Attach txrx SOC
8065  * @ctrl_psoc:	Opaque SOC handle from control plane
8066  * @htc_handle:	Opaque HTC handle
8067  * @hif_handle:	Opaque HIF handle
8068  * @qdf_osdev:	QDF device
8069  * @ol_ops:	Offload Operations
8070  * @device_id:	Device ID
8071  *
8072  * Return: DP SOC handle on success, NULL on failure
8073  */
8074 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
8075 			  HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
8076 			  struct ol_if_ops *ol_ops, uint16_t device_id)
8077 {
8078 	struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
8079 	int target_type;
8080 
8081 	if (!soc) {
8082 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8083 			FL("DP SOC memory allocation failed"));
8084 		goto fail0;
8085 	}
8086 
8087 	soc->device_id = device_id;
8088 	soc->cdp_soc.ops = &dp_txrx_ops;
8089 	soc->cdp_soc.ol_ops = ol_ops;
8090 	soc->ctrl_psoc = ctrl_psoc;
8091 	soc->osdev = qdf_osdev;
8092 	soc->hif_handle = hif_handle;
8093 
8094 	soc->hal_soc = hif_get_hal_handle(hif_handle);
8095 	soc->htt_handle = htt_soc_attach(soc, ctrl_psoc, htc_handle,
8096 		soc->hal_soc, qdf_osdev);
8097 	if (!soc->htt_handle) {
8098 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8099 			FL("HTT attach failed"));
8100 		goto fail1;
8101 	}
8102 
8103 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
8104 	if (!soc->wlan_cfg_ctx) {
8105 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8106 			FL("wlan_cfg_soc_attach failed"));
8107 		goto fail2;
8108 	}
8109 	target_type = hal_get_target_type(soc->hal_soc);
8110 	switch (target_type) {
8111 	case TARGET_TYPE_QCA6290:
8112 #ifdef QCA_WIFI_QCA6390
8113 	case TARGET_TYPE_QCA6390:
8114 #endif
8115 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
8116 					       REO_DST_RING_SIZE_QCA6290);
8117 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
8118 		break;
8119 	case TARGET_TYPE_QCA8074:
8120 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
8121 					       REO_DST_RING_SIZE_QCA8074);
8122 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
8123 		break;
8124 	case TARGET_TYPE_QCA8074V2:
8125 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
8126 					       REO_DST_RING_SIZE_QCA8074);
8127 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
8128 		break;
8129 	default:
8130 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
8131 		qdf_assert_always(0);
8132 		break;
8133 	}
8134 
8135 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
8136 			     cfg_get(ctrl_psoc, CFG_DP_RX_HASH));
8137 	soc->cce_disable = false;
8138 
8139 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
8140 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
8141 				CDP_CFG_MAX_PEER_ID);
8142 
8143 		if (ret != -EINVAL) {
8144 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
8145 		}
8146 
8147 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
8148 				CDP_CFG_CCE_DISABLE);
8149 		if (ret == 1)
8150 			soc->cce_disable = true;
8151 	}
8152 
8153 	qdf_spinlock_create(&soc->peer_ref_mutex);
8154 
8155 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
8156 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
8157 
8158 	/* fill the tx/rx cpu ring map*/
8159 	dp_soc_set_txrx_ring_map(soc);
8160 
8161 	qdf_spinlock_create(&soc->htt_stats.lock);
8162 	/* initialize work queue for stats processing */
8163 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
8164 
8165 	/*Initialize inactivity timer for wifison */
8166 	dp_init_inact_timer(soc);
8167 
8168 	return (void *)soc;
8169 
8170 fail2:
8171 	htt_soc_detach(soc->htt_handle);
8172 fail1:
8173 	qdf_mem_free(soc);
8174 fail0:
8175 	return NULL;
8176 }
8177 #endif
8178 
8179 /*
8180  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
8181  *
8182  * @soc: handle to DP soc
8183  * @mac_id: MAC id
8184  *
8185  * Return: Return pdev corresponding to MAC
8186  */
8187 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
8188 {
8189 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
8190 		return soc->pdev_list[mac_id];
8191 
8192 	/* Typically for MCL as there only 1 PDEV*/
8193 	return soc->pdev_list[0];
8194 }
8195 
8196 /*
8197  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
8198  * @soc:		DP SoC context
8199  * @max_mac_rings:	No of MAC rings
8200  *
8201  * Return: None
8202  */
8203 static
8204 void dp_is_hw_dbs_enable(struct dp_soc *soc,
8205 				int *max_mac_rings)
8206 {
8207 	bool dbs_enable = false;
8208 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
8209 		dbs_enable = soc->cdp_soc.ol_ops->
8210 		is_hw_dbs_2x2_capable(soc->ctrl_psoc);
8211 
8212 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
8213 }
8214 
8215 /*
8216 * dp_set_pktlog_wifi3() - attach txrx vdev
8217 * @pdev: Datapath PDEV handle
8218 * @event: which event's notifications are being subscribed to
8219 * @enable: WDI event subscribe or not. (True or False)
8220 *
8221 * Return: Success, NULL on failure
8222 */
8223 #ifdef WDI_EVENT_ENABLE
8224 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
8225 	bool enable)
8226 {
8227 	struct dp_soc *soc = pdev->soc;
8228 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
8229 	int max_mac_rings = wlan_cfg_get_num_mac_rings
8230 					(pdev->wlan_cfg_ctx);
8231 	uint8_t mac_id = 0;
8232 
8233 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
8234 
8235 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
8236 			FL("Max_mac_rings %d "),
8237 			max_mac_rings);
8238 
8239 	if (enable) {
8240 		switch (event) {
8241 		case WDI_EVENT_RX_DESC:
8242 			if (pdev->monitor_vdev) {
8243 				/* Nothing needs to be done if monitor mode is
8244 				 * enabled
8245 				 */
8246 				return 0;
8247 			}
8248 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
8249 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
8250 				htt_tlv_filter.mpdu_start = 1;
8251 				htt_tlv_filter.msdu_start = 1;
8252 				htt_tlv_filter.msdu_end = 1;
8253 				htt_tlv_filter.mpdu_end = 1;
8254 				htt_tlv_filter.packet_header = 1;
8255 				htt_tlv_filter.attention = 1;
8256 				htt_tlv_filter.ppdu_start = 1;
8257 				htt_tlv_filter.ppdu_end = 1;
8258 				htt_tlv_filter.ppdu_end_user_stats = 1;
8259 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
8260 				htt_tlv_filter.ppdu_end_status_done = 1;
8261 				htt_tlv_filter.enable_fp = 1;
8262 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8263 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8264 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8265 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8266 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8267 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
8268 
8269 				for (mac_id = 0; mac_id < max_mac_rings;
8270 								mac_id++) {
8271 					int mac_for_pdev =
8272 						dp_get_mac_id_for_pdev(mac_id,
8273 								pdev->pdev_id);
8274 
8275 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8276 					 mac_for_pdev,
8277 					 pdev->rxdma_mon_status_ring[mac_id]
8278 					 .hal_srng,
8279 					 RXDMA_MONITOR_STATUS,
8280 					 RX_BUFFER_SIZE,
8281 					 &htt_tlv_filter);
8282 
8283 				}
8284 
8285 				if (soc->reap_timer_init)
8286 					qdf_timer_mod(&soc->mon_reap_timer,
8287 					DP_INTR_POLL_TIMER_MS);
8288 			}
8289 			break;
8290 
8291 		case WDI_EVENT_LITE_RX:
8292 			if (pdev->monitor_vdev) {
8293 				/* Nothing needs to be done if monitor mode is
8294 				 * enabled
8295 				 */
8296 				return 0;
8297 			}
8298 
8299 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
8300 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
8301 
8302 				htt_tlv_filter.ppdu_start = 1;
8303 				htt_tlv_filter.ppdu_end = 1;
8304 				htt_tlv_filter.ppdu_end_user_stats = 1;
8305 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
8306 				htt_tlv_filter.ppdu_end_status_done = 1;
8307 				htt_tlv_filter.mpdu_start = 1;
8308 				htt_tlv_filter.enable_fp = 1;
8309 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8310 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8311 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8312 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8313 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8314 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
8315 
8316 				for (mac_id = 0; mac_id < max_mac_rings;
8317 								mac_id++) {
8318 					int mac_for_pdev =
8319 						dp_get_mac_id_for_pdev(mac_id,
8320 								pdev->pdev_id);
8321 
8322 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8323 					mac_for_pdev,
8324 					pdev->rxdma_mon_status_ring[mac_id]
8325 					.hal_srng,
8326 					RXDMA_MONITOR_STATUS,
8327 					RX_BUFFER_SIZE_PKTLOG_LITE,
8328 					&htt_tlv_filter);
8329 				}
8330 
8331 				if (soc->reap_timer_init)
8332 					qdf_timer_mod(&soc->mon_reap_timer,
8333 					DP_INTR_POLL_TIMER_MS);
8334 			}
8335 			break;
8336 
8337 		case WDI_EVENT_LITE_T2H:
8338 			if (pdev->monitor_vdev) {
8339 				/* Nothing needs to be done if monitor mode is
8340 				 * enabled
8341 				 */
8342 				return 0;
8343 			}
8344 
8345 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
8346 				int mac_for_pdev = dp_get_mac_id_for_pdev(
8347 							mac_id,	pdev->pdev_id);
8348 
8349 				pdev->pktlog_ppdu_stats = true;
8350 				dp_h2t_cfg_stats_msg_send(pdev,
8351 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
8352 					mac_for_pdev);
8353 			}
8354 			break;
8355 
8356 		default:
8357 			/* Nothing needs to be done for other pktlog types */
8358 			break;
8359 		}
8360 	} else {
8361 		switch (event) {
8362 		case WDI_EVENT_RX_DESC:
8363 		case WDI_EVENT_LITE_RX:
8364 			if (pdev->monitor_vdev) {
8365 				/* Nothing needs to be done if monitor mode is
8366 				 * enabled
8367 				 */
8368 				return 0;
8369 			}
8370 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
8371 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
8372 
8373 				for (mac_id = 0; mac_id < max_mac_rings;
8374 								mac_id++) {
8375 					int mac_for_pdev =
8376 						dp_get_mac_id_for_pdev(mac_id,
8377 								pdev->pdev_id);
8378 
8379 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8380 					  mac_for_pdev,
8381 					  pdev->rxdma_mon_status_ring[mac_id]
8382 					  .hal_srng,
8383 					  RXDMA_MONITOR_STATUS,
8384 					  RX_BUFFER_SIZE,
8385 					  &htt_tlv_filter);
8386 				}
8387 
8388 				if (soc->reap_timer_init)
8389 					qdf_timer_stop(&soc->mon_reap_timer);
8390 			}
8391 			break;
8392 		case WDI_EVENT_LITE_T2H:
8393 			if (pdev->monitor_vdev) {
8394 				/* Nothing needs to be done if monitor mode is
8395 				 * enabled
8396 				 */
8397 				return 0;
8398 			}
8399 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
8400 			 * passing value 0. Once these macros will define in htt
8401 			 * header file will use proper macros
8402 			*/
8403 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
8404 				int mac_for_pdev =
8405 						dp_get_mac_id_for_pdev(mac_id,
8406 								pdev->pdev_id);
8407 
8408 				pdev->pktlog_ppdu_stats = false;
8409 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
8410 					dp_h2t_cfg_stats_msg_send(pdev, 0,
8411 								mac_for_pdev);
8412 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
8413 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
8414 								mac_for_pdev);
8415 				} else if (pdev->enhanced_stats_en) {
8416 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
8417 								mac_for_pdev);
8418 				}
8419 			}
8420 
8421 			break;
8422 		default:
8423 			/* Nothing needs to be done for other pktlog types */
8424 			break;
8425 		}
8426 	}
8427 	return 0;
8428 }
8429 #endif
8430