xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 5e652ebbb966ae3180b3b572924c8205078e9f76)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_htt.h"
31 #include "dp_types.h"
32 #include "dp_internal.h"
33 #include "dp_tx.h"
34 #include "dp_tx_desc.h"
35 #include "dp_rx.h"
36 #include <cdp_txrx_handle.h>
37 #include <wlan_cfg.h>
38 #include "cdp_txrx_cmn_struct.h"
39 #include "cdp_txrx_stats_struct.h"
40 #include "cdp_txrx_cmn_reg.h"
41 #include <qdf_util.h>
42 #include "dp_peer.h"
43 #include "dp_rx_mon.h"
44 #include "htt_stats.h"
45 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
46 #include "cfg_ucfg_api.h"
47 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
48 #include "cdp_txrx_flow_ctrl_v2.h"
49 #else
50 static inline void
51 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
52 {
53 	return;
54 }
55 #endif
56 #include "dp_ipa.h"
57 
58 #include "dp_cal_client_api.h"
59 
60 #ifdef CONFIG_MCL
61 #ifndef REMOVE_PKT_LOG
62 #include <pktlog_ac_api.h>
63 #include <pktlog_ac.h>
64 #endif
65 #endif
66 static void dp_pktlogmod_exit(struct dp_pdev *handle);
67 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
68 				uint8_t *peer_mac_addr,
69 				struct cdp_ctrl_objmgr_peer *ctrl_peer);
70 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
71 static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
72 static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
73 
74 #define DP_INTR_POLL_TIMER_MS	10
75 #define DP_WDS_AGING_TIMER_DEFAULT_MS	120000
76 #define DP_MCS_LENGTH (6*MAX_MCS)
77 #define DP_NSS_LENGTH (6*SS_COUNT)
78 #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
79 #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
80 #define DP_MAX_MCS_STRING_LEN 30
81 #define DP_CURR_FW_STATS_AVAIL 19
82 #define DP_HTT_DBG_EXT_STATS_MAX 256
83 #define DP_MAX_SLEEP_TIME 100
84 
85 #ifdef IPA_OFFLOAD
86 /* Exclude IPA rings from the interrupt context */
87 #define TX_RING_MASK_VAL	0xb
88 #define RX_RING_MASK_VAL	0x7
89 #else
90 #define TX_RING_MASK_VAL	0xF
91 #define RX_RING_MASK_VAL	0xF
92 #endif
93 
94 #define STR_MAXLEN	64
95 
96 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
97 
98 /* PPDU stats mask sent to FW to enable enhanced stats */
99 #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
100 /* PPDU stats mask sent to FW to support debug sniffer feature */
101 #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
102 /* PPDU stats mask sent to FW to support BPR feature*/
103 #define DP_PPDU_STATS_CFG_BPR 0x2000
104 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
105 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
106 				   DP_PPDU_STATS_CFG_ENH_STATS)
107 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
108 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
109 				      DP_PPDU_TXLITE_STATS_BITMASK_CFG)
110 
111 #define RNG_ERR		"SRNG setup failed for"
112 /**
113  * default_dscp_tid_map - Default DSCP-TID mapping
114  *
115  * DSCP        TID
116  * 000000      0
117  * 001000      1
118  * 010000      2
119  * 011000      3
120  * 100000      4
121  * 101000      5
122  * 110000      6
123  * 111000      7
124  */
125 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
126 	0, 0, 0, 0, 0, 0, 0, 0,
127 	1, 1, 1, 1, 1, 1, 1, 1,
128 	2, 2, 2, 2, 2, 2, 2, 2,
129 	3, 3, 3, 3, 3, 3, 3, 3,
130 	4, 4, 4, 4, 4, 4, 4, 4,
131 	5, 5, 5, 5, 5, 5, 5, 5,
132 	6, 6, 6, 6, 6, 6, 6, 6,
133 	7, 7, 7, 7, 7, 7, 7, 7,
134 };
135 
136 /*
137  * struct dp_rate_debug
138  *
139  * @mcs_type: print string for a given mcs
140  * @valid: valid mcs rate?
141  */
142 struct dp_rate_debug {
143 	char mcs_type[DP_MAX_MCS_STRING_LEN];
144 	uint8_t valid;
145 };
146 
147 #define MCS_VALID 1
148 #define MCS_INVALID 0
149 
150 static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
151 
152 	{
153 		{"OFDM 48 Mbps", MCS_VALID},
154 		{"OFDM 24 Mbps", MCS_VALID},
155 		{"OFDM 12 Mbps", MCS_VALID},
156 		{"OFDM 6 Mbps ", MCS_VALID},
157 		{"OFDM 54 Mbps", MCS_VALID},
158 		{"OFDM 36 Mbps", MCS_VALID},
159 		{"OFDM 18 Mbps", MCS_VALID},
160 		{"OFDM 9 Mbps ", MCS_VALID},
161 		{"INVALID ", MCS_INVALID},
162 		{"INVALID ", MCS_INVALID},
163 		{"INVALID ", MCS_INVALID},
164 		{"INVALID ", MCS_INVALID},
165 		{"INVALID ", MCS_VALID},
166 	},
167 	{
168 		{"CCK 11 Mbps Long  ", MCS_VALID},
169 		{"CCK 5.5 Mbps Long ", MCS_VALID},
170 		{"CCK 2 Mbps Long   ", MCS_VALID},
171 		{"CCK 1 Mbps Long   ", MCS_VALID},
172 		{"CCK 11 Mbps Short ", MCS_VALID},
173 		{"CCK 5.5 Mbps Short", MCS_VALID},
174 		{"CCK 2 Mbps Short  ", MCS_VALID},
175 		{"INVALID ", MCS_INVALID},
176 		{"INVALID ", MCS_INVALID},
177 		{"INVALID ", MCS_INVALID},
178 		{"INVALID ", MCS_INVALID},
179 		{"INVALID ", MCS_INVALID},
180 		{"INVALID ", MCS_VALID},
181 	},
182 	{
183 		{"HT MCS 0 (BPSK 1/2)  ", MCS_VALID},
184 		{"HT MCS 1 (QPSK 1/2)  ", MCS_VALID},
185 		{"HT MCS 2 (QPSK 3/4)  ", MCS_VALID},
186 		{"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
187 		{"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
188 		{"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
189 		{"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
190 		{"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
191 		{"INVALID ", MCS_INVALID},
192 		{"INVALID ", MCS_INVALID},
193 		{"INVALID ", MCS_INVALID},
194 		{"INVALID ", MCS_INVALID},
195 		{"INVALID ", MCS_VALID},
196 	},
197 	{
198 		{"VHT MCS 0 (BPSK 1/2)     ", MCS_VALID},
199 		{"VHT MCS 1 (QPSK 1/2)     ", MCS_VALID},
200 		{"VHT MCS 2 (QPSK 3/4)     ", MCS_VALID},
201 		{"VHT MCS 3 (16-QAM 1/2)   ", MCS_VALID},
202 		{"VHT MCS 4 (16-QAM 3/4)   ", MCS_VALID},
203 		{"VHT MCS 5 (64-QAM 2/3)   ", MCS_VALID},
204 		{"VHT MCS 6 (64-QAM 3/4)   ", MCS_VALID},
205 		{"VHT MCS 7 (64-QAM 5/6)   ", MCS_VALID},
206 		{"VHT MCS 8 (256-QAM 3/4)  ", MCS_VALID},
207 		{"VHT MCS 9 (256-QAM 5/6)  ", MCS_VALID},
208 		{"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
209 		{"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
210 		{"INVALID ", MCS_VALID},
211 	},
212 	{
213 		{"HE MCS 0 (BPSK 1/2)     ", MCS_VALID},
214 		{"HE MCS 1 (QPSK 1/2)     ", MCS_VALID},
215 		{"HE MCS 2 (QPSK 3/4)     ", MCS_VALID},
216 		{"HE MCS 3 (16-QAM 1/2)   ", MCS_VALID},
217 		{"HE MCS 4 (16-QAM 3/4)   ", MCS_VALID},
218 		{"HE MCS 5 (64-QAM 2/3)   ", MCS_VALID},
219 		{"HE MCS 6 (64-QAM 3/4)   ", MCS_VALID},
220 		{"HE MCS 7 (64-QAM 5/6)   ", MCS_VALID},
221 		{"HE MCS 8 (256-QAM 3/4)  ", MCS_VALID},
222 		{"HE MCS 9 (256-QAM 5/6)  ", MCS_VALID},
223 		{"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
224 		{"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
225 		{"INVALID ", MCS_VALID},
226 	}
227 };
228 
229 /**
230  * @brief Cpu ring map types
231  */
232 enum dp_cpu_ring_map_types {
233 	DP_DEFAULT_MAP,
234 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
235 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
236 	DP_NSS_ALL_RADIO_OFFLOADED_MAP,
237 	DP_CPU_RING_MAP_MAX
238 };
239 
240 /**
241  * @brief Cpu to tx ring map
242  */
243 static uint8_t dp_cpu_ring_map[DP_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
244 	{0x0, 0x1, 0x2, 0x0},
245 	{0x1, 0x2, 0x1, 0x2},
246 	{0x0, 0x2, 0x0, 0x2},
247 	{0x2, 0x2, 0x2, 0x2}
248 };
249 
250 /**
251  * @brief Select the type of statistics
252  */
253 enum dp_stats_type {
254 	STATS_FW = 0,
255 	STATS_HOST = 1,
256 	STATS_TYPE_MAX = 2,
257 };
258 
259 /**
260  * @brief General Firmware statistics options
261  *
262  */
263 enum dp_fw_stats {
264 	TXRX_FW_STATS_INVALID	= -1,
265 };
266 
267 /**
268  * dp_stats_mapping_table - Firmware and Host statistics
269  * currently supported
270  */
271 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
272 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
273 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
274 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
275 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
276 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
277 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
278 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
279 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
280 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
281 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
282 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
283 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
284 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
285 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
286 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
287 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
288 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
289 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
290 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
291 	/* Last ENUM for HTT FW STATS */
292 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
293 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
294 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
295 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
296 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
297 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
298 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
299 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
300 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
301 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
302 };
303 
304 /* MCL specific functions */
305 #ifdef CONFIG_MCL
306 /**
307  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
308  * @soc: pointer to dp_soc handle
309  * @intr_ctx_num: interrupt context number for which mon mask is needed
310  *
311  * For MCL, monitor mode rings are being processed in timer contexts (polled).
312  * This function is returning 0, since in interrupt mode(softirq based RX),
313  * we donot want to process monitor mode rings in a softirq.
314  *
315  * So, in case packet log is enabled for SAP/STA/P2P modes,
316  * regular interrupt processing will not process monitor mode rings. It would be
317  * done in a separate timer context.
318  *
319  * Return: 0
320  */
321 static inline
322 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
323 {
324 	return 0;
325 }
326 
327 /*
328  * dp_service_mon_rings()- timer to reap monitor rings
329  * reqd as we are not getting ppdu end interrupts
330  * @arg: SoC Handle
331  *
332  * Return:
333  *
334  */
335 static void dp_service_mon_rings(void *arg)
336 {
337 	struct dp_soc *soc = (struct dp_soc *)arg;
338 	int ring = 0, work_done, mac_id;
339 	struct dp_pdev *pdev = NULL;
340 
341 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
342 		pdev = soc->pdev_list[ring];
343 		if (!pdev)
344 			continue;
345 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
346 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
347 								pdev->pdev_id);
348 			work_done = dp_mon_process(soc, mac_for_pdev,
349 						   QCA_NAPI_BUDGET);
350 
351 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
352 				  FL("Reaped %d descs from Monitor rings"),
353 				  work_done);
354 		}
355 	}
356 
357 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
358 }
359 
360 #ifndef REMOVE_PKT_LOG
361 /**
362  * dp_pkt_log_init() - API to initialize packet log
363  * @ppdev: physical device handle
364  * @scn: HIF context
365  *
366  * Return: none
367  */
368 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
369 {
370 	struct dp_pdev *handle = (struct dp_pdev *)ppdev;
371 
372 	if (handle->pkt_log_init) {
373 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
374 			  "%s: Packet log not initialized", __func__);
375 		return;
376 	}
377 
378 	pktlog_sethandle(&handle->pl_dev, scn);
379 	pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
380 
381 	if (pktlogmod_init(scn)) {
382 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
383 			  "%s: pktlogmod_init failed", __func__);
384 		handle->pkt_log_init = false;
385 	} else {
386 		handle->pkt_log_init = true;
387 	}
388 }
389 
390 /**
391  * dp_pkt_log_con_service() - connect packet log service
392  * @ppdev: physical device handle
393  * @scn: device context
394  *
395  * Return: none
396  */
397 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
398 {
399 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
400 
401 	dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
402 	pktlog_htc_attach();
403 }
404 
405 /**
406  * dp_pktlogmod_exit() - API to cleanup pktlog info
407  * @handle: Pdev handle
408  *
409  * Return: none
410  */
411 static void dp_pktlogmod_exit(struct dp_pdev *handle)
412 {
413 	void *scn = (void *)handle->soc->hif_handle;
414 
415 	if (!scn) {
416 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
417 			  "%s: Invalid hif(scn) handle", __func__);
418 		return;
419 	}
420 
421 	pktlogmod_exit(scn);
422 	handle->pkt_log_init = false;
423 }
424 #endif
425 #else
426 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
427 
428 /**
429  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
430  * @soc: pointer to dp_soc handle
431  * @intr_ctx_num: interrupt context number for which mon mask is needed
432  *
433  * Return: mon mask value
434  */
435 static inline
436 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
437 {
438 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
439 }
440 #endif
441 
442 /**
443  * dp_get_dp_vdev_from_cdp_vdev() - get dp_vdev from cdp_vdev by type-casting
444  * @cdp_opaque_vdev: pointer to cdp_vdev
445  *
446  * Return: pointer to dp_vdev
447  */
448 static
449 struct dp_vdev * dp_get_dp_vdev_from_cdp_vdev(struct cdp_vdev *cdp_opaque_vdev)
450 {
451 	return (struct dp_vdev *)cdp_opaque_vdev;
452 }
453 
454 
455 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
456 					struct cdp_peer *peer_hdl,
457 					uint8_t *mac_addr,
458 					enum cdp_txrx_ast_entry_type type,
459 					uint32_t flags)
460 {
461 
462 	return dp_peer_add_ast((struct dp_soc *)soc_hdl,
463 				(struct dp_peer *)peer_hdl,
464 				mac_addr,
465 				type,
466 				flags);
467 }
468 
469 static void dp_peer_del_ast_wifi3(struct cdp_soc_t *soc_hdl,
470 					 void *ast_entry_hdl)
471 {
472 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
473 	qdf_spin_lock_bh(&soc->ast_lock);
474 	dp_peer_del_ast((struct dp_soc *)soc_hdl,
475 			(struct dp_ast_entry *)ast_entry_hdl);
476 	qdf_spin_unlock_bh(&soc->ast_lock);
477 }
478 
479 
480 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
481 						struct cdp_peer *peer_hdl,
482 						uint8_t *wds_macaddr,
483 						uint32_t flags)
484 {
485 	int status = -1;
486 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
487 	struct dp_ast_entry  *ast_entry = NULL;
488 
489 	qdf_spin_lock_bh(&soc->ast_lock);
490 	ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
491 
492 	if (ast_entry) {
493 		status = dp_peer_update_ast(soc,
494 					    (struct dp_peer *)peer_hdl,
495 					   ast_entry, flags);
496 	}
497 
498 	qdf_spin_unlock_bh(&soc->ast_lock);
499 
500 	return status;
501 }
502 
503 /*
504  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
505  * @soc_handle:		Datapath SOC handle
506  * @wds_macaddr:	WDS entry MAC Address
507  * Return: None
508  */
509 static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
510 				   uint8_t *wds_macaddr, void *vdev_handle)
511 {
512 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
513 	struct dp_ast_entry *ast_entry = NULL;
514 
515 	qdf_spin_lock_bh(&soc->ast_lock);
516 	ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
517 
518 	if (ast_entry) {
519 		if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
520 		    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF)) {
521 			ast_entry->is_active = TRUE;
522 		}
523 	}
524 
525 	qdf_spin_unlock_bh(&soc->ast_lock);
526 }
527 
528 /*
529  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
530  * @soc:		Datapath SOC handle
531  *
532  * Return: None
533  */
534 static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
535 					 void *vdev_hdl)
536 {
537 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
538 	struct dp_pdev *pdev;
539 	struct dp_vdev *vdev;
540 	struct dp_peer *peer;
541 	struct dp_ast_entry *ase, *temp_ase;
542 	int i;
543 
544 	qdf_spin_lock_bh(&soc->ast_lock);
545 
546 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
547 		pdev = soc->pdev_list[i];
548 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
549 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
550 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
551 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
552 					if ((ase->type ==
553 					     CDP_TXRX_AST_TYPE_STATIC) ||
554 					    (ase->type ==
555 					     CDP_TXRX_AST_TYPE_SELF))
556 						continue;
557 					ase->is_active = TRUE;
558 				}
559 			}
560 		}
561 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
562 	}
563 
564 	qdf_spin_unlock_bh(&soc->ast_lock);
565 }
566 
567 /*
568  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
569  * @soc:		Datapath SOC handle
570  *
571  * Return: None
572  */
573 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
574 {
575 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
576 	struct dp_pdev *pdev;
577 	struct dp_vdev *vdev;
578 	struct dp_peer *peer;
579 	struct dp_ast_entry *ase, *temp_ase;
580 	int i;
581 
582 	qdf_spin_lock_bh(&soc->ast_lock);
583 
584 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
585 		pdev = soc->pdev_list[i];
586 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
587 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
588 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
589 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
590 					if ((ase->type ==
591 					     CDP_TXRX_AST_TYPE_STATIC) ||
592 					    (ase->type ==
593 					     CDP_TXRX_AST_TYPE_SELF))
594 						continue;
595 					dp_peer_del_ast(soc, ase);
596 				}
597 			}
598 		}
599 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
600 	}
601 
602 	qdf_spin_unlock_bh(&soc->ast_lock);
603 }
604 
605 static void *dp_peer_ast_hash_find_wifi3(struct cdp_soc_t *soc_hdl,
606 						uint8_t *ast_mac_addr)
607 {
608 	struct dp_ast_entry *ast_entry;
609 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
610 	qdf_spin_lock_bh(&soc->ast_lock);
611 	ast_entry = dp_peer_ast_hash_find(soc, ast_mac_addr);
612 	qdf_spin_unlock_bh(&soc->ast_lock);
613 	return (void *)ast_entry;
614 }
615 
616 static uint8_t dp_peer_ast_get_pdev_id_wifi3(struct cdp_soc_t *soc_hdl,
617 							void *ast_entry_hdl)
618 {
619 	return dp_peer_ast_get_pdev_id((struct dp_soc *)soc_hdl,
620 					(struct dp_ast_entry *)ast_entry_hdl);
621 }
622 
623 static uint8_t dp_peer_ast_get_next_hop_wifi3(struct cdp_soc_t *soc_hdl,
624 							void *ast_entry_hdl)
625 {
626 	return dp_peer_ast_get_next_hop((struct dp_soc *)soc_hdl,
627 					(struct dp_ast_entry *)ast_entry_hdl);
628 }
629 
630 static void dp_peer_ast_set_type_wifi3(
631 					struct cdp_soc_t *soc_hdl,
632 					void *ast_entry_hdl,
633 					enum cdp_txrx_ast_entry_type type)
634 {
635 	dp_peer_ast_set_type((struct dp_soc *)soc_hdl,
636 				(struct dp_ast_entry *)ast_entry_hdl,
637 				type);
638 }
639 
640 static enum cdp_txrx_ast_entry_type dp_peer_ast_get_type_wifi3(
641 					struct cdp_soc_t *soc_hdl,
642 					void *ast_entry_hdl)
643 {
644 	return ((struct dp_ast_entry *)ast_entry_hdl)->type;
645 }
646 
647 /**
648  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
649  * @ring_num: ring num of the ring being queried
650  * @grp_mask: the grp_mask array for the ring type in question.
651  *
652  * The grp_mask array is indexed by group number and the bit fields correspond
653  * to ring numbers.  We are finding which interrupt group a ring belongs to.
654  *
655  * Return: the index in the grp_mask array with the ring number.
656  * -QDF_STATUS_E_NOENT if no entry is found
657  */
658 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
659 {
660 	int ext_group_num;
661 	int mask = 1 << ring_num;
662 
663 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
664 	     ext_group_num++) {
665 		if (mask & grp_mask[ext_group_num])
666 			return ext_group_num;
667 	}
668 
669 	return -QDF_STATUS_E_NOENT;
670 }
671 
672 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
673 				       enum hal_ring_type ring_type,
674 				       int ring_num)
675 {
676 	int *grp_mask;
677 
678 	switch (ring_type) {
679 	case WBM2SW_RELEASE:
680 		/* dp_tx_comp_handler - soc->tx_comp_ring */
681 		if (ring_num < 3)
682 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
683 
684 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
685 		else if (ring_num == 3) {
686 			/* sw treats this as a separate ring type */
687 			grp_mask = &soc->wlan_cfg_ctx->
688 				int_rx_wbm_rel_ring_mask[0];
689 			ring_num = 0;
690 		} else {
691 			qdf_assert(0);
692 			return -QDF_STATUS_E_NOENT;
693 		}
694 	break;
695 
696 	case REO_EXCEPTION:
697 		/* dp_rx_err_process - &soc->reo_exception_ring */
698 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
699 	break;
700 
701 	case REO_DST:
702 		/* dp_rx_process - soc->reo_dest_ring */
703 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
704 	break;
705 
706 	case REO_STATUS:
707 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
708 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
709 	break;
710 
711 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
712 	case RXDMA_MONITOR_STATUS:
713 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
714 	case RXDMA_MONITOR_DST:
715 		/* dp_mon_process */
716 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
717 	break;
718 	case RXDMA_DST:
719 		/* dp_rxdma_err_process */
720 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
721 	break;
722 
723 	case RXDMA_BUF:
724 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
725 	break;
726 
727 	case RXDMA_MONITOR_BUF:
728 		/* TODO: support low_thresh interrupt */
729 		return -QDF_STATUS_E_NOENT;
730 	break;
731 
732 	case TCL_DATA:
733 	case TCL_CMD:
734 	case REO_CMD:
735 	case SW2WBM_RELEASE:
736 	case WBM_IDLE_LINK:
737 		/* normally empty SW_TO_HW rings */
738 		return -QDF_STATUS_E_NOENT;
739 	break;
740 
741 	case TCL_STATUS:
742 	case REO_REINJECT:
743 		/* misc unused rings */
744 		return -QDF_STATUS_E_NOENT;
745 	break;
746 
747 	case CE_SRC:
748 	case CE_DST:
749 	case CE_DST_STATUS:
750 		/* CE_rings - currently handled by hif */
751 	default:
752 		return -QDF_STATUS_E_NOENT;
753 	break;
754 	}
755 
756 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
757 }
758 
759 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
760 			      *ring_params, int ring_type, int ring_num)
761 {
762 	int msi_group_number;
763 	int msi_data_count;
764 	int ret;
765 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
766 
767 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
768 					    &msi_data_count, &msi_data_start,
769 					    &msi_irq_start);
770 
771 	if (ret)
772 		return;
773 
774 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
775 						       ring_num);
776 	if (msi_group_number < 0) {
777 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
778 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
779 			ring_type, ring_num);
780 		ring_params->msi_addr = 0;
781 		ring_params->msi_data = 0;
782 		return;
783 	}
784 
785 	if (msi_group_number > msi_data_count) {
786 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
787 			FL("2 msi_groups will share an msi; msi_group_num %d"),
788 			msi_group_number);
789 
790 		QDF_ASSERT(0);
791 	}
792 
793 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
794 
795 	ring_params->msi_addr = addr_low;
796 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
797 	ring_params->msi_data = (msi_group_number % msi_data_count)
798 		+ msi_data_start;
799 	ring_params->flags |= HAL_SRNG_MSI_INTR;
800 }
801 
802 /**
803  * dp_print_ast_stats() - Dump AST table contents
804  * @soc: Datapath soc handle
805  *
806  * return void
807  */
808 #ifdef FEATURE_AST
809 static void dp_print_ast_stats(struct dp_soc *soc)
810 {
811 	uint8_t i;
812 	uint8_t num_entries = 0;
813 	struct dp_vdev *vdev;
814 	struct dp_pdev *pdev;
815 	struct dp_peer *peer;
816 	struct dp_ast_entry *ase, *tmp_ase;
817 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
818 			"NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS"};
819 
820 	DP_PRINT_STATS("AST Stats:");
821 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
822 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
823 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
824 	DP_PRINT_STATS("AST Table:");
825 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
826 		pdev = soc->pdev_list[i];
827 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
828 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
829 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
830 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
831 					DP_PRINT_STATS("%6d mac_addr = %pM"
832 							" peer_mac_addr = %pM"
833 							" type = %s"
834 							" next_hop = %d"
835 							" is_active = %d"
836 							" is_bss = %d"
837 							" ast_idx = %d"
838 							" pdev_id = %d"
839 							" vdev_id = %d",
840 							++num_entries,
841 							ase->mac_addr.raw,
842 							ase->peer->mac_addr.raw,
843 							type[ase->type],
844 							ase->next_hop,
845 							ase->is_active,
846 							ase->is_bss,
847 							ase->ast_idx,
848 							ase->pdev_id,
849 							ase->vdev_id);
850 				}
851 			}
852 		}
853 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
854 	}
855 }
856 #else
857 static void dp_print_ast_stats(struct dp_soc *soc)
858 {
859 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
860 	return;
861 }
862 #endif
863 
864 static void dp_print_peer_table(struct dp_vdev *vdev)
865 {
866 	struct dp_peer *peer = NULL;
867 
868 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
869 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
870 		if (!peer) {
871 			DP_PRINT_STATS("Invalid Peer");
872 			return;
873 		}
874 		DP_PRINT_STATS("    peer_mac_addr = %pM"
875 			" nawds_enabled = %d"
876 			" bss_peer = %d"
877 			" wapi = %d"
878 			" wds_enabled = %d"
879 			" delete in progress = %d",
880 			peer->mac_addr.raw,
881 			peer->nawds_enabled,
882 			peer->bss_peer,
883 			peer->wapi,
884 			peer->wds_enabled,
885 			peer->delete_in_progress);
886 	}
887 }
888 
889 /*
890  * dp_setup_srng - Internal function to setup SRNG rings used by data path
891  */
892 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
893 	int ring_type, int ring_num, int mac_id, uint32_t num_entries)
894 {
895 	void *hal_soc = soc->hal_soc;
896 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
897 	/* TODO: See if we should get align size from hal */
898 	uint32_t ring_base_align = 8;
899 	struct hal_srng_params ring_params;
900 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
901 
902 	/* TODO: Currently hal layer takes care of endianness related settings.
903 	 * See if these settings need to passed from DP layer
904 	 */
905 	ring_params.flags = 0;
906 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
907 		  FL("Ring type: %d, num:%d"), ring_type, ring_num);
908 
909 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
910 	srng->hal_srng = NULL;
911 	srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
912 	srng->num_entries = num_entries;
913 	srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
914 		soc->osdev, soc->osdev->dev, srng->alloc_size,
915 		&(srng->base_paddr_unaligned));
916 
917 	if (!srng->base_vaddr_unaligned) {
918 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
919 			FL("alloc failed - ring_type: %d, ring_num %d"),
920 			ring_type, ring_num);
921 		return QDF_STATUS_E_NOMEM;
922 	}
923 
924 	ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
925 		((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
926 	ring_params.ring_base_paddr = srng->base_paddr_unaligned +
927 		((unsigned long)(ring_params.ring_base_vaddr) -
928 		(unsigned long)srng->base_vaddr_unaligned);
929 	ring_params.num_entries = num_entries;
930 
931 	if (soc->intr_mode == DP_INTR_MSI) {
932 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
933 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
934 			  FL("Using MSI for ring_type: %d, ring_num %d"),
935 			  ring_type, ring_num);
936 
937 	} else {
938 		ring_params.msi_data = 0;
939 		ring_params.msi_addr = 0;
940 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
941 			  FL("Skipping MSI for ring_type: %d, ring_num %d"),
942 			  ring_type, ring_num);
943 	}
944 
945 	/*
946 	 * Setup interrupt timer and batch counter thresholds for
947 	 * interrupt mitigation based on ring type
948 	 */
949 	if (ring_type == REO_DST) {
950 		ring_params.intr_timer_thres_us =
951 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
952 		ring_params.intr_batch_cntr_thres_entries =
953 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
954 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
955 		ring_params.intr_timer_thres_us =
956 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
957 		ring_params.intr_batch_cntr_thres_entries =
958 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
959 	} else {
960 		ring_params.intr_timer_thres_us =
961 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
962 		ring_params.intr_batch_cntr_thres_entries =
963 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
964 	}
965 
966 	/* Enable low threshold interrupts for rx buffer rings (regular and
967 	 * monitor buffer rings.
968 	 * TODO: See if this is required for any other ring
969 	 */
970 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
971 		(ring_type == RXDMA_MONITOR_STATUS)) {
972 		/* TODO: Setting low threshold to 1/8th of ring size
973 		 * see if this needs to be configurable
974 		 */
975 		ring_params.low_threshold = num_entries >> 3;
976 		ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
977 		ring_params.intr_timer_thres_us =
978 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
979 		ring_params.intr_batch_cntr_thres_entries = 0;
980 	}
981 
982 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
983 		mac_id, &ring_params);
984 
985 	if (!srng->hal_srng) {
986 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
987 				srng->alloc_size,
988 				srng->base_vaddr_unaligned,
989 				srng->base_paddr_unaligned, 0);
990 	}
991 
992 	return 0;
993 }
994 
995 /**
996  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
997  * Any buffers allocated and attached to ring entries are expected to be freed
998  * before calling this function.
999  */
1000 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
1001 	int ring_type, int ring_num)
1002 {
1003 	if (!srng->hal_srng) {
1004 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1005 			FL("Ring type: %d, num:%d not setup"),
1006 			ring_type, ring_num);
1007 		return;
1008 	}
1009 
1010 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1011 
1012 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1013 				srng->alloc_size,
1014 				srng->base_vaddr_unaligned,
1015 				srng->base_paddr_unaligned, 0);
1016 	srng->hal_srng = NULL;
1017 }
1018 
1019 /* TODO: Need this interface from HIF */
1020 void *hif_get_hal_handle(void *hif_handle);
1021 
1022 /*
1023  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1024  * @dp_ctx: DP SOC handle
1025  * @budget: Number of frames/descriptors that can be processed in one shot
1026  *
1027  * Return: remaining budget/quota for the soc device
1028  */
1029 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
1030 {
1031 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1032 	struct dp_soc *soc = int_ctx->soc;
1033 	int ring = 0;
1034 	uint32_t work_done  = 0;
1035 	int budget = dp_budget;
1036 	uint8_t tx_mask = int_ctx->tx_ring_mask;
1037 	uint8_t rx_mask = int_ctx->rx_ring_mask;
1038 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1039 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1040 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1041 	uint32_t remaining_quota = dp_budget;
1042 	struct dp_pdev *pdev = NULL;
1043 	int mac_id;
1044 
1045 	/* Process Tx completion interrupts first to return back buffers */
1046 	while (tx_mask) {
1047 		if (tx_mask & 0x1) {
1048 			work_done = dp_tx_comp_handler(soc,
1049 					soc->tx_comp_ring[ring].hal_srng,
1050 					remaining_quota);
1051 
1052 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1053 				"tx mask 0x%x ring %d, budget %d, work_done %d",
1054 				tx_mask, ring, budget, work_done);
1055 
1056 			budget -= work_done;
1057 			if (budget <= 0)
1058 				goto budget_done;
1059 
1060 			remaining_quota = budget;
1061 		}
1062 		tx_mask = tx_mask >> 1;
1063 		ring++;
1064 	}
1065 
1066 
1067 	/* Process REO Exception ring interrupt */
1068 	if (rx_err_mask) {
1069 		work_done = dp_rx_err_process(soc,
1070 				soc->reo_exception_ring.hal_srng,
1071 				remaining_quota);
1072 
1073 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1074 			"REO Exception Ring: work_done %d budget %d",
1075 			work_done, budget);
1076 
1077 		budget -=  work_done;
1078 		if (budget <= 0) {
1079 			goto budget_done;
1080 		}
1081 		remaining_quota = budget;
1082 	}
1083 
1084 	/* Process Rx WBM release ring interrupt */
1085 	if (rx_wbm_rel_mask) {
1086 		work_done = dp_rx_wbm_err_process(soc,
1087 				soc->rx_rel_ring.hal_srng, remaining_quota);
1088 
1089 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1090 			"WBM Release Ring: work_done %d budget %d",
1091 			work_done, budget);
1092 
1093 		budget -=  work_done;
1094 		if (budget <= 0) {
1095 			goto budget_done;
1096 		}
1097 		remaining_quota = budget;
1098 	}
1099 
1100 	/* Process Rx interrupts */
1101 	if (rx_mask) {
1102 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1103 			if (rx_mask & (1 << ring)) {
1104 				work_done = dp_rx_process(int_ctx,
1105 					    soc->reo_dest_ring[ring].hal_srng,
1106 					    ring,
1107 					    remaining_quota);
1108 
1109 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1110 					"rx mask 0x%x ring %d, work_done %d budget %d",
1111 					rx_mask, ring, work_done, budget);
1112 
1113 				budget -=  work_done;
1114 				if (budget <= 0)
1115 					goto budget_done;
1116 				remaining_quota = budget;
1117 			}
1118 		}
1119 		for (ring = 0; ring < MAX_RX_MAC_RINGS; ring++) {
1120 			work_done = dp_rxdma_err_process(soc, ring,
1121 						remaining_quota);
1122 			budget -= work_done;
1123 		}
1124 	}
1125 
1126 	if (reo_status_mask)
1127 		dp_reo_status_ring_handler(soc);
1128 
1129 	/* Process LMAC interrupts */
1130 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
1131 		pdev = soc->pdev_list[ring];
1132 		if (pdev == NULL)
1133 			continue;
1134 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1135 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
1136 								pdev->pdev_id);
1137 
1138 			if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1139 				work_done = dp_mon_process(soc, mac_for_pdev,
1140 						remaining_quota);
1141 				budget -= work_done;
1142 				if (budget <= 0)
1143 					goto budget_done;
1144 				remaining_quota = budget;
1145 			}
1146 
1147 			if (int_ctx->rxdma2host_ring_mask &
1148 					(1 << mac_for_pdev)) {
1149 				work_done = dp_rxdma_err_process(soc,
1150 							mac_for_pdev,
1151 							remaining_quota);
1152 				budget -=  work_done;
1153 				if (budget <= 0)
1154 					goto budget_done;
1155 				remaining_quota = budget;
1156 			}
1157 
1158 			if (int_ctx->host2rxdma_ring_mask &
1159 						(1 << mac_for_pdev)) {
1160 				union dp_rx_desc_list_elem_t *desc_list = NULL;
1161 				union dp_rx_desc_list_elem_t *tail = NULL;
1162 				struct dp_srng *rx_refill_buf_ring =
1163 					&pdev->rx_refill_buf_ring;
1164 
1165 				DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1166 						1);
1167 				dp_rx_buffers_replenish(soc, mac_for_pdev,
1168 					rx_refill_buf_ring,
1169 					&soc->rx_desc_buf[mac_for_pdev], 0,
1170 					&desc_list, &tail);
1171 			}
1172 		}
1173 	}
1174 
1175 	qdf_lro_flush(int_ctx->lro_ctx);
1176 
1177 budget_done:
1178 	return dp_budget - budget;
1179 }
1180 
1181 /* dp_interrupt_timer()- timer poll for interrupts
1182  *
1183  * @arg: SoC Handle
1184  *
1185  * Return:
1186  *
1187  */
1188 static void dp_interrupt_timer(void *arg)
1189 {
1190 	struct dp_soc *soc = (struct dp_soc *) arg;
1191 	int i;
1192 
1193 	if (qdf_atomic_read(&soc->cmn_init_done)) {
1194 		for (i = 0;
1195 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1196 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
1197 
1198 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1199 	}
1200 }
1201 
1202 /*
1203  * dp_soc_interrupt_attach_poll() - Register handlers for DP interrupts
1204  * @txrx_soc: DP SOC handle
1205  *
1206  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1207  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1208  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1209  *
1210  * Return: 0 for success. nonzero for failure.
1211  */
1212 static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
1213 {
1214 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1215 	int i;
1216 
1217 	soc->intr_mode = DP_INTR_POLL;
1218 
1219 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1220 		soc->intr_ctx[i].dp_intr_id = i;
1221 		soc->intr_ctx[i].tx_ring_mask =
1222 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1223 		soc->intr_ctx[i].rx_ring_mask =
1224 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1225 		soc->intr_ctx[i].rx_mon_ring_mask =
1226 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1227 		soc->intr_ctx[i].rx_err_ring_mask =
1228 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1229 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1230 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1231 		soc->intr_ctx[i].reo_status_ring_mask =
1232 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1233 		soc->intr_ctx[i].rxdma2host_ring_mask =
1234 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1235 		soc->intr_ctx[i].soc = soc;
1236 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1237 	}
1238 
1239 	qdf_timer_init(soc->osdev, &soc->int_timer,
1240 			dp_interrupt_timer, (void *)soc,
1241 			QDF_TIMER_TYPE_WAKE_APPS);
1242 
1243 	return QDF_STATUS_SUCCESS;
1244 }
1245 
1246 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
1247 #if defined(CONFIG_MCL)
1248 extern int con_mode_monitor;
1249 /*
1250  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1251  * @txrx_soc: DP SOC handle
1252  *
1253  * Call the appropriate attach function based on the mode of operation.
1254  * This is a WAR for enabling monitor mode.
1255  *
1256  * Return: 0 for success. nonzero for failure.
1257  */
1258 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1259 {
1260 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1261 
1262 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1263 	     con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
1264 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1265 				  "%s: Poll mode", __func__);
1266 		return dp_soc_attach_poll(txrx_soc);
1267 	} else {
1268 
1269 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1270 				  "%s: Interrupt  mode", __func__);
1271 		return dp_soc_interrupt_attach(txrx_soc);
1272 	}
1273 }
1274 #else
1275 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
1276 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1277 {
1278 	return dp_soc_attach_poll(txrx_soc);
1279 }
1280 #else
1281 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1282 {
1283 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1284 
1285 	if (hif_is_polled_mode_enabled(soc->hif_handle))
1286 		return dp_soc_attach_poll(txrx_soc);
1287 	else
1288 		return dp_soc_interrupt_attach(txrx_soc);
1289 }
1290 #endif
1291 #endif
1292 
1293 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1294 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1295 {
1296 	int j;
1297 	int num_irq = 0;
1298 
1299 	int tx_mask =
1300 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1301 	int rx_mask =
1302 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1303 	int rx_mon_mask =
1304 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1305 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1306 					soc->wlan_cfg_ctx, intr_ctx_num);
1307 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1308 					soc->wlan_cfg_ctx, intr_ctx_num);
1309 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1310 					soc->wlan_cfg_ctx, intr_ctx_num);
1311 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1312 					soc->wlan_cfg_ctx, intr_ctx_num);
1313 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1314 					soc->wlan_cfg_ctx, intr_ctx_num);
1315 
1316 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1317 
1318 		if (tx_mask & (1 << j)) {
1319 			irq_id_map[num_irq++] =
1320 				(wbm2host_tx_completions_ring1 - j);
1321 		}
1322 
1323 		if (rx_mask & (1 << j)) {
1324 			irq_id_map[num_irq++] =
1325 				(reo2host_destination_ring1 - j);
1326 		}
1327 
1328 		if (rxdma2host_ring_mask & (1 << j)) {
1329 			irq_id_map[num_irq++] =
1330 				rxdma2host_destination_ring_mac1 -
1331 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1332 		}
1333 
1334 		if (host2rxdma_ring_mask & (1 << j)) {
1335 			irq_id_map[num_irq++] =
1336 				host2rxdma_host_buf_ring_mac1 -
1337 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1338 		}
1339 
1340 		if (rx_mon_mask & (1 << j)) {
1341 			irq_id_map[num_irq++] =
1342 				ppdu_end_interrupts_mac1 -
1343 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1344 			irq_id_map[num_irq++] =
1345 				rxdma2host_monitor_status_ring_mac1 -
1346 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1347 		}
1348 
1349 		if (rx_wbm_rel_ring_mask & (1 << j))
1350 			irq_id_map[num_irq++] = wbm2host_rx_release;
1351 
1352 		if (rx_err_ring_mask & (1 << j))
1353 			irq_id_map[num_irq++] = reo2host_exception;
1354 
1355 		if (reo_status_ring_mask & (1 << j))
1356 			irq_id_map[num_irq++] = reo2host_status;
1357 
1358 	}
1359 	*num_irq_r = num_irq;
1360 }
1361 
1362 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1363 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1364 		int msi_vector_count, int msi_vector_start)
1365 {
1366 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1367 					soc->wlan_cfg_ctx, intr_ctx_num);
1368 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1369 					soc->wlan_cfg_ctx, intr_ctx_num);
1370 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1371 					soc->wlan_cfg_ctx, intr_ctx_num);
1372 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1373 					soc->wlan_cfg_ctx, intr_ctx_num);
1374 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1375 					soc->wlan_cfg_ctx, intr_ctx_num);
1376 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1377 					soc->wlan_cfg_ctx, intr_ctx_num);
1378 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1379 					soc->wlan_cfg_ctx, intr_ctx_num);
1380 
1381 	unsigned int vector =
1382 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1383 	int num_irq = 0;
1384 
1385 	soc->intr_mode = DP_INTR_MSI;
1386 
1387 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1388 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1389 		irq_id_map[num_irq++] =
1390 			pld_get_msi_irq(soc->osdev->dev, vector);
1391 
1392 	*num_irq_r = num_irq;
1393 }
1394 
1395 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1396 				    int *irq_id_map, int *num_irq)
1397 {
1398 	int msi_vector_count, ret;
1399 	uint32_t msi_base_data, msi_vector_start;
1400 
1401 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1402 					    &msi_vector_count,
1403 					    &msi_base_data,
1404 					    &msi_vector_start);
1405 	if (ret)
1406 		return dp_soc_interrupt_map_calculate_integrated(soc,
1407 				intr_ctx_num, irq_id_map, num_irq);
1408 
1409 	else
1410 		dp_soc_interrupt_map_calculate_msi(soc,
1411 				intr_ctx_num, irq_id_map, num_irq,
1412 				msi_vector_count, msi_vector_start);
1413 }
1414 
1415 /*
1416  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1417  * @txrx_soc: DP SOC handle
1418  *
1419  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1420  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1421  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1422  *
1423  * Return: 0 for success. nonzero for failure.
1424  */
1425 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
1426 {
1427 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1428 
1429 	int i = 0;
1430 	int num_irq = 0;
1431 
1432 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1433 		int ret = 0;
1434 
1435 		/* Map of IRQ ids registered with one interrupt context */
1436 		int irq_id_map[HIF_MAX_GRP_IRQ];
1437 
1438 		int tx_mask =
1439 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1440 		int rx_mask =
1441 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1442 		int rx_mon_mask =
1443 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
1444 		int rx_err_ring_mask =
1445 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1446 		int rx_wbm_rel_ring_mask =
1447 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1448 		int reo_status_ring_mask =
1449 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1450 		int rxdma2host_ring_mask =
1451 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1452 		int host2rxdma_ring_mask =
1453 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1454 
1455 
1456 		soc->intr_ctx[i].dp_intr_id = i;
1457 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1458 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1459 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1460 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1461 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1462 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1463 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1464 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1465 
1466 		soc->intr_ctx[i].soc = soc;
1467 
1468 		num_irq = 0;
1469 
1470 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1471 					       &num_irq);
1472 
1473 		ret = hif_register_ext_group(soc->hif_handle,
1474 				num_irq, irq_id_map, dp_service_srngs,
1475 				&soc->intr_ctx[i], "dp_intr",
1476 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1477 
1478 		if (ret) {
1479 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1480 			FL("failed, ret = %d"), ret);
1481 
1482 			return QDF_STATUS_E_FAILURE;
1483 		}
1484 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1485 	}
1486 
1487 	hif_configure_ext_group_interrupts(soc->hif_handle);
1488 
1489 	return QDF_STATUS_SUCCESS;
1490 }
1491 
1492 /*
1493  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1494  * @txrx_soc: DP SOC handle
1495  *
1496  * Return: void
1497  */
1498 static void dp_soc_interrupt_detach(void *txrx_soc)
1499 {
1500 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1501 	int i;
1502 
1503 	if (soc->intr_mode == DP_INTR_POLL) {
1504 		qdf_timer_stop(&soc->int_timer);
1505 		qdf_timer_free(&soc->int_timer);
1506 	} else {
1507 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1508 	}
1509 
1510 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1511 		soc->intr_ctx[i].tx_ring_mask = 0;
1512 		soc->intr_ctx[i].rx_ring_mask = 0;
1513 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1514 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1515 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1516 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1517 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1518 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1519 
1520 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1521 	}
1522 }
1523 
1524 #define AVG_MAX_MPDUS_PER_TID 128
1525 #define AVG_TIDS_PER_CLIENT 2
1526 #define AVG_FLOWS_PER_TID 2
1527 #define AVG_MSDUS_PER_FLOW 128
1528 #define AVG_MSDUS_PER_MPDU 4
1529 
1530 /*
1531  * Allocate and setup link descriptor pool that will be used by HW for
1532  * various link and queue descriptors and managed by WBM
1533  */
1534 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1535 {
1536 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1537 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1538 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1539 	uint32_t num_mpdus_per_link_desc =
1540 		hal_num_mpdus_per_link_desc(soc->hal_soc);
1541 	uint32_t num_msdus_per_link_desc =
1542 		hal_num_msdus_per_link_desc(soc->hal_soc);
1543 	uint32_t num_mpdu_links_per_queue_desc =
1544 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1545 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1546 	uint32_t total_link_descs, total_mem_size;
1547 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1548 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1549 	uint32_t num_link_desc_banks;
1550 	uint32_t last_bank_size = 0;
1551 	uint32_t entry_size, num_entries;
1552 	int i;
1553 	uint32_t desc_id = 0;
1554 
1555 	/* Only Tx queue descriptors are allocated from common link descriptor
1556 	 * pool Rx queue descriptors are not included in this because (REO queue
1557 	 * extension descriptors) they are expected to be allocated contiguously
1558 	 * with REO queue descriptors
1559 	 */
1560 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1561 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1562 
1563 	num_mpdu_queue_descs = num_mpdu_link_descs /
1564 		num_mpdu_links_per_queue_desc;
1565 
1566 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1567 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1568 		num_msdus_per_link_desc;
1569 
1570 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1571 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1572 
1573 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1574 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1575 
1576 	/* Round up to power of 2 */
1577 	total_link_descs = 1;
1578 	while (total_link_descs < num_entries)
1579 		total_link_descs <<= 1;
1580 
1581 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1582 		FL("total_link_descs: %u, link_desc_size: %d"),
1583 		total_link_descs, link_desc_size);
1584 	total_mem_size =  total_link_descs * link_desc_size;
1585 
1586 	total_mem_size += link_desc_align;
1587 
1588 	if (total_mem_size <= max_alloc_size) {
1589 		num_link_desc_banks = 0;
1590 		last_bank_size = total_mem_size;
1591 	} else {
1592 		num_link_desc_banks = (total_mem_size) /
1593 			(max_alloc_size - link_desc_align);
1594 		last_bank_size = total_mem_size %
1595 			(max_alloc_size - link_desc_align);
1596 	}
1597 
1598 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1599 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
1600 		total_mem_size, num_link_desc_banks);
1601 
1602 	for (i = 0; i < num_link_desc_banks; i++) {
1603 		soc->link_desc_banks[i].base_vaddr_unaligned =
1604 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1605 			max_alloc_size,
1606 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1607 		soc->link_desc_banks[i].size = max_alloc_size;
1608 
1609 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1610 			soc->link_desc_banks[i].base_vaddr_unaligned) +
1611 			((unsigned long)(
1612 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1613 			link_desc_align));
1614 
1615 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
1616 			soc->link_desc_banks[i].base_paddr_unaligned) +
1617 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1618 			(unsigned long)(
1619 			soc->link_desc_banks[i].base_vaddr_unaligned));
1620 
1621 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
1622 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1623 				FL("Link descriptor memory alloc failed"));
1624 			goto fail;
1625 		}
1626 	}
1627 
1628 	if (last_bank_size) {
1629 		/* Allocate last bank in case total memory required is not exact
1630 		 * multiple of max_alloc_size
1631 		 */
1632 		soc->link_desc_banks[i].base_vaddr_unaligned =
1633 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1634 			last_bank_size,
1635 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1636 		soc->link_desc_banks[i].size = last_bank_size;
1637 
1638 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1639 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
1640 			((unsigned long)(
1641 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1642 			link_desc_align));
1643 
1644 		soc->link_desc_banks[i].base_paddr =
1645 			(unsigned long)(
1646 			soc->link_desc_banks[i].base_paddr_unaligned) +
1647 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1648 			(unsigned long)(
1649 			soc->link_desc_banks[i].base_vaddr_unaligned));
1650 	}
1651 
1652 
1653 	/* Allocate and setup link descriptor idle list for HW internal use */
1654 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1655 	total_mem_size = entry_size * total_link_descs;
1656 
1657 	if (total_mem_size <= max_alloc_size) {
1658 		void *desc;
1659 
1660 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
1661 			WBM_IDLE_LINK, 0, 0, total_link_descs)) {
1662 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1663 				FL("Link desc idle ring setup failed"));
1664 			goto fail;
1665 		}
1666 
1667 		hal_srng_access_start_unlocked(soc->hal_soc,
1668 			soc->wbm_idle_link_ring.hal_srng);
1669 
1670 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1671 			soc->link_desc_banks[i].base_paddr; i++) {
1672 			uint32_t num_entries = (soc->link_desc_banks[i].size -
1673 				((unsigned long)(
1674 				soc->link_desc_banks[i].base_vaddr) -
1675 				(unsigned long)(
1676 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1677 				/ link_desc_size;
1678 			unsigned long paddr = (unsigned long)(
1679 				soc->link_desc_banks[i].base_paddr);
1680 
1681 			while (num_entries && (desc = hal_srng_src_get_next(
1682 				soc->hal_soc,
1683 				soc->wbm_idle_link_ring.hal_srng))) {
1684 				hal_set_link_desc_addr(desc,
1685 					LINK_DESC_COOKIE(desc_id, i), paddr);
1686 				num_entries--;
1687 				desc_id++;
1688 				paddr += link_desc_size;
1689 			}
1690 		}
1691 		hal_srng_access_end_unlocked(soc->hal_soc,
1692 			soc->wbm_idle_link_ring.hal_srng);
1693 	} else {
1694 		uint32_t num_scatter_bufs;
1695 		uint32_t num_entries_per_buf;
1696 		uint32_t rem_entries;
1697 		uint8_t *scatter_buf_ptr;
1698 		uint16_t scatter_buf_num;
1699 
1700 		soc->wbm_idle_scatter_buf_size =
1701 			hal_idle_list_scatter_buf_size(soc->hal_soc);
1702 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
1703 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
1704 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1705 					soc->hal_soc, total_mem_size,
1706 					soc->wbm_idle_scatter_buf_size);
1707 
1708 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
1709 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1710 					FL("scatter bufs size out of bounds"));
1711 			goto fail;
1712 		}
1713 
1714 		for (i = 0; i < num_scatter_bufs; i++) {
1715 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
1716 				qdf_mem_alloc_consistent(soc->osdev,
1717 							soc->osdev->dev,
1718 				soc->wbm_idle_scatter_buf_size,
1719 				&(soc->wbm_idle_scatter_buf_base_paddr[i]));
1720 			if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
1721 				QDF_TRACE(QDF_MODULE_ID_DP,
1722 						QDF_TRACE_LEVEL_ERROR,
1723 					FL("Scatter list memory alloc failed"));
1724 				goto fail;
1725 			}
1726 		}
1727 
1728 		/* Populate idle list scatter buffers with link descriptor
1729 		 * pointers
1730 		 */
1731 		scatter_buf_num = 0;
1732 		scatter_buf_ptr = (uint8_t *)(
1733 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
1734 		rem_entries = num_entries_per_buf;
1735 
1736 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1737 			soc->link_desc_banks[i].base_paddr; i++) {
1738 			uint32_t num_link_descs =
1739 				(soc->link_desc_banks[i].size -
1740 				((unsigned long)(
1741 				soc->link_desc_banks[i].base_vaddr) -
1742 				(unsigned long)(
1743 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1744 				/ link_desc_size;
1745 			unsigned long paddr = (unsigned long)(
1746 				soc->link_desc_banks[i].base_paddr);
1747 
1748 			while (num_link_descs) {
1749 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
1750 					LINK_DESC_COOKIE(desc_id, i), paddr);
1751 				num_link_descs--;
1752 				desc_id++;
1753 				paddr += link_desc_size;
1754 				rem_entries--;
1755 				if (rem_entries) {
1756 					scatter_buf_ptr += entry_size;
1757 				} else {
1758 					rem_entries = num_entries_per_buf;
1759 					scatter_buf_num++;
1760 
1761 					if (scatter_buf_num >= num_scatter_bufs)
1762 						break;
1763 
1764 					scatter_buf_ptr = (uint8_t *)(
1765 						soc->wbm_idle_scatter_buf_base_vaddr[
1766 						scatter_buf_num]);
1767 				}
1768 			}
1769 		}
1770 		/* Setup link descriptor idle list in HW */
1771 		hal_setup_link_idle_list(soc->hal_soc,
1772 			soc->wbm_idle_scatter_buf_base_paddr,
1773 			soc->wbm_idle_scatter_buf_base_vaddr,
1774 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
1775 			(uint32_t)(scatter_buf_ptr -
1776 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
1777 			scatter_buf_num-1])), total_link_descs);
1778 	}
1779 	return 0;
1780 
1781 fail:
1782 	if (soc->wbm_idle_link_ring.hal_srng) {
1783 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
1784 				WBM_IDLE_LINK, 0);
1785 	}
1786 
1787 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1788 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1789 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1790 				soc->wbm_idle_scatter_buf_size,
1791 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1792 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1793 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1794 		}
1795 	}
1796 
1797 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1798 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1799 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1800 				soc->link_desc_banks[i].size,
1801 				soc->link_desc_banks[i].base_vaddr_unaligned,
1802 				soc->link_desc_banks[i].base_paddr_unaligned,
1803 				0);
1804 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1805 		}
1806 	}
1807 	return QDF_STATUS_E_FAILURE;
1808 }
1809 
1810 /*
1811  * Free link descriptor pool that was setup HW
1812  */
1813 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
1814 {
1815 	int i;
1816 
1817 	if (soc->wbm_idle_link_ring.hal_srng) {
1818 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
1819 			WBM_IDLE_LINK, 0);
1820 	}
1821 
1822 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1823 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1824 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1825 				soc->wbm_idle_scatter_buf_size,
1826 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1827 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1828 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1829 		}
1830 	}
1831 
1832 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1833 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1834 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1835 				soc->link_desc_banks[i].size,
1836 				soc->link_desc_banks[i].base_vaddr_unaligned,
1837 				soc->link_desc_banks[i].base_paddr_unaligned,
1838 				0);
1839 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1840 		}
1841 	}
1842 }
1843 
1844 #define REO_DST_RING_SIZE_QCA6290 1024
1845 #ifndef QCA_WIFI_QCA8074_VP
1846 #define REO_DST_RING_SIZE_QCA8074 2048
1847 #else
1848 #define REO_DST_RING_SIZE_QCA8074 8
1849 #endif
1850 
1851 /*
1852  * dp_wds_aging_timer_fn() - Timer callback function for WDS aging
1853  * @soc: Datapath SOC handle
1854  *
1855  * This is a timer function used to age out stale AST nodes from
1856  * AST table
1857  */
1858 #ifdef FEATURE_WDS
1859 static void dp_wds_aging_timer_fn(void *soc_hdl)
1860 {
1861 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
1862 	struct dp_pdev *pdev;
1863 	struct dp_vdev *vdev;
1864 	struct dp_peer *peer;
1865 	struct dp_ast_entry *ase, *temp_ase;
1866 	int i;
1867 
1868 	qdf_spin_lock_bh(&soc->ast_lock);
1869 
1870 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1871 		pdev = soc->pdev_list[i];
1872 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
1873 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1874 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1875 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
1876 					/*
1877 					 * Do not expire static ast entries
1878 					 * and HM WDS entries
1879 					 */
1880 					if (ase->type != CDP_TXRX_AST_TYPE_WDS)
1881 						continue;
1882 
1883 					if (ase->is_active) {
1884 						ase->is_active = FALSE;
1885 						continue;
1886 					}
1887 
1888 					DP_STATS_INC(soc, ast.aged_out, 1);
1889 					dp_peer_del_ast(soc, ase);
1890 				}
1891 			}
1892 		}
1893 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1894 	}
1895 
1896 	qdf_spin_unlock_bh(&soc->ast_lock);
1897 
1898 	if (qdf_atomic_read(&soc->cmn_init_done))
1899 		qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1900 }
1901 
1902 
1903 /*
1904  * dp_soc_wds_attach() - Setup WDS timer and AST table
1905  * @soc:		Datapath SOC handle
1906  *
1907  * Return: None
1908  */
1909 static void dp_soc_wds_attach(struct dp_soc *soc)
1910 {
1911 	qdf_timer_init(soc->osdev, &soc->wds_aging_timer,
1912 			dp_wds_aging_timer_fn, (void *)soc,
1913 			QDF_TIMER_TYPE_WAKE_APPS);
1914 
1915 	qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1916 }
1917 
1918 /*
1919  * dp_soc_wds_detach() - Detach WDS data structures and timers
1920  * @txrx_soc: DP SOC handle
1921  *
1922  * Return: None
1923  */
1924 static void dp_soc_wds_detach(struct dp_soc *soc)
1925 {
1926 	qdf_timer_stop(&soc->wds_aging_timer);
1927 	qdf_timer_free(&soc->wds_aging_timer);
1928 }
1929 #else
1930 static void dp_soc_wds_attach(struct dp_soc *soc)
1931 {
1932 }
1933 
1934 static void dp_soc_wds_detach(struct dp_soc *soc)
1935 {
1936 }
1937 #endif
1938 
1939 /*
1940  * dp_soc_reset_ring_map() - Reset cpu ring map
1941  * @soc: Datapath soc handler
1942  *
1943  * This api resets the default cpu ring map
1944  */
1945 
1946 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
1947 {
1948 	uint8_t i;
1949 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1950 
1951 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
1952 		if (nss_config == 1) {
1953 			/*
1954 			 * Setting Tx ring map for one nss offloaded radio
1955 			 */
1956 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
1957 		} else if (nss_config == 2) {
1958 			/*
1959 			 * Setting Tx ring for two nss offloaded radios
1960 			 */
1961 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
1962 		} else {
1963 			/*
1964 			 * Setting Tx ring map for all nss offloaded radios
1965 			 */
1966 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_ALL_RADIO_OFFLOADED_MAP][i];
1967 		}
1968 	}
1969 }
1970 
1971 /*
1972  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
1973  * @dp_soc - DP soc handle
1974  * @ring_type - ring type
1975  * @ring_num - ring_num
1976  *
1977  * return 0 or 1
1978  */
1979 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
1980 {
1981 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1982 	uint8_t status = 0;
1983 
1984 	switch (ring_type) {
1985 	case WBM2SW_RELEASE:
1986 	case REO_DST:
1987 	case RXDMA_BUF:
1988 		status = ((nss_config) & (1 << ring_num));
1989 		break;
1990 	default:
1991 		break;
1992 	}
1993 
1994 	return status;
1995 }
1996 
1997 /*
1998  * dp_soc_reset_intr_mask() - reset interrupt mask
1999  * @dp_soc - DP Soc handle
2000  *
2001  * Return: Return void
2002  */
2003 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
2004 {
2005 	uint8_t j;
2006 	int *grp_mask = NULL;
2007 	int group_number, mask, num_ring;
2008 
2009 	/* number of tx ring */
2010 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2011 
2012 	/*
2013 	 * group mask for tx completion  ring.
2014 	 */
2015 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2016 
2017 	/* loop and reset the mask for only offloaded ring */
2018 	for (j = 0; j < num_ring; j++) {
2019 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2020 			continue;
2021 		}
2022 
2023 		/*
2024 		 * Group number corresponding to tx offloaded ring.
2025 		 */
2026 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2027 		if (group_number < 0) {
2028 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2029 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2030 					WBM2SW_RELEASE, j);
2031 			return;
2032 		}
2033 
2034 		/* reset the tx mask for offloaded ring */
2035 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2036 		mask &= (~(1 << j));
2037 
2038 		/*
2039 		 * reset the interrupt mask for offloaded ring.
2040 		 */
2041 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2042 	}
2043 
2044 	/* number of rx rings */
2045 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2046 
2047 	/*
2048 	 * group mask for reo destination ring.
2049 	 */
2050 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2051 
2052 	/* loop and reset the mask for only offloaded ring */
2053 	for (j = 0; j < num_ring; j++) {
2054 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2055 			continue;
2056 		}
2057 
2058 		/*
2059 		 * Group number corresponding to rx offloaded ring.
2060 		 */
2061 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2062 		if (group_number < 0) {
2063 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2064 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2065 					REO_DST, j);
2066 			return;
2067 		}
2068 
2069 		/* set the interrupt mask for offloaded ring */
2070 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2071 		mask &= (~(1 << j));
2072 
2073 		/*
2074 		 * set the interrupt mask to zero for rx offloaded radio.
2075 		 */
2076 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2077 	}
2078 
2079 	/*
2080 	 * group mask for Rx buffer refill ring
2081 	 */
2082 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2083 
2084 	/* loop and reset the mask for only offloaded ring */
2085 	for (j = 0; j < MAX_PDEV_CNT; j++) {
2086 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2087 			continue;
2088 		}
2089 
2090 		/*
2091 		 * Group number corresponding to rx offloaded ring.
2092 		 */
2093 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2094 		if (group_number < 0) {
2095 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2096 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2097 					REO_DST, j);
2098 			return;
2099 		}
2100 
2101 		/* set the interrupt mask for offloaded ring */
2102 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2103 				group_number);
2104 		mask &= (~(1 << j));
2105 
2106 		/*
2107 		 * set the interrupt mask to zero for rx offloaded radio.
2108 		 */
2109 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2110 			group_number, mask);
2111 	}
2112 }
2113 
2114 #ifdef IPA_OFFLOAD
2115 /**
2116  * dp_reo_remap_config() - configure reo remap register value based
2117  *                         nss configuration.
2118  *		based on offload_radio value below remap configuration
2119  *		get applied.
2120  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2121  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2122  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2123  *		3 - both Radios handled by NSS (remap not required)
2124  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2125  *
2126  * @remap1: output parameter indicates reo remap 1 register value
2127  * @remap2: output parameter indicates reo remap 2 register value
2128  * Return: bool type, true if remap is configured else false.
2129  */
2130 static bool dp_reo_remap_config(struct dp_soc *soc,
2131 				uint32_t *remap1,
2132 				uint32_t *remap2)
2133 {
2134 
2135 	*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
2136 		(0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
2137 
2138 	*remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
2139 		(0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
2140 
2141 	return true;
2142 }
2143 #else
2144 static bool dp_reo_remap_config(struct dp_soc *soc,
2145 				uint32_t *remap1,
2146 				uint32_t *remap2)
2147 {
2148 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2149 
2150 	switch (offload_radio) {
2151 	case 0:
2152 		*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2153 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2154 			(0x3 << 18) | (0x4 << 21)) << 8;
2155 
2156 		*remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2157 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2158 			(0x3 << 18) | (0x4 << 21)) << 8;
2159 		break;
2160 
2161 	case 1:
2162 		*remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
2163 			(0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
2164 			(0x2 << 18) | (0x3 << 21)) << 8;
2165 
2166 		*remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
2167 			(0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
2168 			(0x4 << 18) | (0x2 << 21)) << 8;
2169 		break;
2170 
2171 	case 2:
2172 		*remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
2173 			(0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
2174 			(0x1 << 18) | (0x3 << 21)) << 8;
2175 
2176 		*remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
2177 			(0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
2178 			(0x4 << 18) | (0x1 << 21)) << 8;
2179 		break;
2180 
2181 	case 3:
2182 		/* return false if both radios are offloaded to NSS */
2183 		return false;
2184 	}
2185 	return true;
2186 }
2187 #endif
2188 
2189 /*
2190  * dp_reo_frag_dst_set() - configure reo register to set the
2191  *                        fragment destination ring
2192  * @soc : Datapath soc
2193  * @frag_dst_ring : output parameter to set fragment destination ring
2194  *
2195  * Based on offload_radio below fragment destination rings is selected
2196  * 0 - TCL
2197  * 1 - SW1
2198  * 2 - SW2
2199  * 3 - SW3
2200  * 4 - SW4
2201  * 5 - Release
2202  * 6 - FW
2203  * 7 - alternate select
2204  *
2205  * return: void
2206  */
2207 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2208 {
2209 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2210 
2211 	switch (offload_radio) {
2212 	case 0:
2213 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2214 		break;
2215 	case 3:
2216 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2217 		break;
2218 	default:
2219 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2220 				FL("dp_reo_frag_dst_set invalid offload radio config"));
2221 		break;
2222 	}
2223 }
2224 
2225 /*
2226  * dp_soc_cmn_setup() - Common SoC level initializion
2227  * @soc:		Datapath SOC handle
2228  *
2229  * This is an internal function used to setup common SOC data structures,
2230  * to be called from PDEV attach after receiving HW mode capabilities from FW
2231  */
2232 static int dp_soc_cmn_setup(struct dp_soc *soc)
2233 {
2234 	int i;
2235 	struct hal_reo_params reo_params;
2236 	int tx_ring_size;
2237 	int tx_comp_ring_size;
2238 	int reo_dst_ring_size;
2239 	uint32_t entries;
2240 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2241 
2242 	if (qdf_atomic_read(&soc->cmn_init_done))
2243 		return 0;
2244 
2245 	if (dp_hw_link_desc_pool_setup(soc))
2246 		goto fail1;
2247 
2248 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2249 	/* Setup SRNG rings */
2250 	/* Common rings */
2251 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
2252 		wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx))) {
2253 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2254 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
2255 		goto fail1;
2256 	}
2257 
2258 
2259 	soc->num_tcl_data_rings = 0;
2260 	/* Tx data rings */
2261 	if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
2262 		soc->num_tcl_data_rings =
2263 			wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
2264 		tx_comp_ring_size =
2265 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2266 		tx_ring_size =
2267 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2268 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2269 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
2270 				TCL_DATA, i, 0, tx_ring_size)) {
2271 				QDF_TRACE(QDF_MODULE_ID_DP,
2272 					QDF_TRACE_LEVEL_ERROR,
2273 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
2274 				goto fail1;
2275 			}
2276 			/*
2277 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2278 			 * count
2279 			 */
2280 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
2281 				WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
2282 				QDF_TRACE(QDF_MODULE_ID_DP,
2283 					QDF_TRACE_LEVEL_ERROR,
2284 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
2285 				goto fail1;
2286 			}
2287 		}
2288 	} else {
2289 		/* This will be incremented during per pdev ring setup */
2290 		soc->num_tcl_data_rings = 0;
2291 	}
2292 
2293 	if (dp_tx_soc_attach(soc)) {
2294 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2295 				FL("dp_tx_soc_attach failed"));
2296 		goto fail1;
2297 	}
2298 
2299 	entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
2300 	/* TCL command and status rings */
2301 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
2302 			  entries)) {
2303 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2304 			FL("dp_srng_setup failed for tcl_cmd_ring"));
2305 		goto fail1;
2306 	}
2307 
2308 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
2309 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
2310 			  entries)) {
2311 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2312 			FL("dp_srng_setup failed for tcl_status_ring"));
2313 		goto fail1;
2314 	}
2315 
2316 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2317 
2318 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2319 	 * descriptors
2320 	 */
2321 
2322 	/* Rx data rings */
2323 	if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2324 		soc->num_reo_dest_rings =
2325 			wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
2326 		QDF_TRACE(QDF_MODULE_ID_DP,
2327 			QDF_TRACE_LEVEL_INFO,
2328 			FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
2329 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2330 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2331 				i, 0, reo_dst_ring_size)) {
2332 				QDF_TRACE(QDF_MODULE_ID_DP,
2333 					  QDF_TRACE_LEVEL_ERROR,
2334 					  FL(RNG_ERR "reo_dest_ring [%d]"), i);
2335 				goto fail1;
2336 			}
2337 		}
2338 	} else {
2339 		/* This will be incremented during per pdev ring setup */
2340 		soc->num_reo_dest_rings = 0;
2341 	}
2342 
2343 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2344 	/* LMAC RxDMA to SW Rings configuration */
2345 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
2346 		/* Only valid for MCL */
2347 		struct dp_pdev *pdev = soc->pdev_list[0];
2348 
2349 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2350 			if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2351 					  RXDMA_DST, 0, i,
2352 					  entries)) {
2353 				QDF_TRACE(QDF_MODULE_ID_DP,
2354 					  QDF_TRACE_LEVEL_ERROR,
2355 					  FL(RNG_ERR "rxdma_err_dst_ring"));
2356 				goto fail1;
2357 			}
2358 		}
2359 	}
2360 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
2361 
2362 	/* REO reinjection ring */
2363 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
2364 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2365 			  entries)) {
2366 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2367 			  FL("dp_srng_setup failed for reo_reinject_ring"));
2368 		goto fail1;
2369 	}
2370 
2371 
2372 	/* Rx release ring */
2373 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2374 		wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx))) {
2375 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2376 			  FL("dp_srng_setup failed for rx_rel_ring"));
2377 		goto fail1;
2378 	}
2379 
2380 
2381 	/* Rx exception ring */
2382 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
2383 	if (dp_srng_setup(soc, &soc->reo_exception_ring,
2384 			  REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries)) {
2385 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2386 			  FL("dp_srng_setup failed for reo_exception_ring"));
2387 		goto fail1;
2388 	}
2389 
2390 
2391 	/* REO command and status rings */
2392 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
2393 		wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx))) {
2394 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2395 			FL("dp_srng_setup failed for reo_cmd_ring"));
2396 		goto fail1;
2397 	}
2398 
2399 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2400 	TAILQ_INIT(&soc->rx.reo_cmd_list);
2401 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2402 
2403 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
2404 		wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx))) {
2405 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2406 			FL("dp_srng_setup failed for reo_status_ring"));
2407 		goto fail1;
2408 	}
2409 
2410 	qdf_spinlock_create(&soc->ast_lock);
2411 	dp_soc_wds_attach(soc);
2412 
2413 	/* Reset the cpu ring map if radio is NSS offloaded */
2414 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
2415 		dp_soc_reset_cpu_ring_map(soc);
2416 		dp_soc_reset_intr_mask(soc);
2417 	}
2418 
2419 	/* Setup HW REO */
2420 	qdf_mem_zero(&reo_params, sizeof(reo_params));
2421 
2422 	if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
2423 
2424 		/*
2425 		 * Reo ring remap is not required if both radios
2426 		 * are offloaded to NSS
2427 		 */
2428 		if (!dp_reo_remap_config(soc,
2429 					&reo_params.remap1,
2430 					&reo_params.remap2))
2431 			goto out;
2432 
2433 		reo_params.rx_hash_enabled = true;
2434 	}
2435 
2436 	/* setup the global rx defrag waitlist */
2437 	TAILQ_INIT(&soc->rx.defrag.waitlist);
2438 	soc->rx.defrag.timeout_ms =
2439 		wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
2440 	soc->rx.flags.defrag_timeout_check =
2441 		wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
2442 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
2443 
2444 out:
2445 	/*
2446 	 * set the fragment destination ring
2447 	 */
2448 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2449 
2450 	hal_reo_setup(soc->hal_soc, &reo_params);
2451 
2452 	qdf_atomic_set(&soc->cmn_init_done, 1);
2453 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
2454 	return 0;
2455 fail1:
2456 	/*
2457 	 * Cleanup will be done as part of soc_detach, which will
2458 	 * be called on pdev attach failure
2459 	 */
2460 	return QDF_STATUS_E_FAILURE;
2461 }
2462 
2463 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
2464 
2465 static void dp_lro_hash_setup(struct dp_soc *soc)
2466 {
2467 	struct cdp_lro_hash_config lro_hash;
2468 
2469 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2470 		!wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2471 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2472 			 FL("LRO disabled RX hash disabled"));
2473 		return;
2474 	}
2475 
2476 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2477 
2478 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
2479 		lro_hash.lro_enable = 1;
2480 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2481 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
2482 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2483 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
2484 	}
2485 
2486 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, FL("enabled"));
2487 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
2488 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2489 		 LRO_IPV4_SEED_ARR_SZ));
2490 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2491 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2492 		 LRO_IPV6_SEED_ARR_SZ));
2493 
2494 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2495 		 "lro_hash: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
2496 		 lro_hash.lro_enable, lro_hash.tcp_flag,
2497 		 lro_hash.tcp_flag_mask);
2498 
2499 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2500 		 QDF_TRACE_LEVEL_ERROR,
2501 		 (void *)lro_hash.toeplitz_hash_ipv4,
2502 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2503 		 LRO_IPV4_SEED_ARR_SZ));
2504 
2505 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2506 		 QDF_TRACE_LEVEL_ERROR,
2507 		 (void *)lro_hash.toeplitz_hash_ipv6,
2508 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2509 		 LRO_IPV6_SEED_ARR_SZ));
2510 
2511 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2512 
2513 	if (soc->cdp_soc.ol_ops->lro_hash_config)
2514 		(void)soc->cdp_soc.ol_ops->lro_hash_config
2515 			(soc->ctrl_psoc, &lro_hash);
2516 }
2517 
2518 /*
2519 * dp_rxdma_ring_setup() - configure the RX DMA rings
2520 * @soc: data path SoC handle
2521 * @pdev: Physical device handle
2522 *
2523 * Return: 0 - success, > 0 - failure
2524 */
2525 #ifdef QCA_HOST2FW_RXBUF_RING
2526 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2527 	 struct dp_pdev *pdev)
2528 {
2529 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2530 	int max_mac_rings;
2531 	int i;
2532 
2533 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2534 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
2535 
2536 	for (i = 0; i < max_mac_rings; i++) {
2537 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2538 			 "%s: pdev_id %d mac_id %d",
2539 			 __func__, pdev->pdev_id, i);
2540 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
2541 			RXDMA_BUF, 1, i,
2542 			wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx))) {
2543 			QDF_TRACE(QDF_MODULE_ID_DP,
2544 				 QDF_TRACE_LEVEL_ERROR,
2545 				 FL("failed rx mac ring setup"));
2546 			return QDF_STATUS_E_FAILURE;
2547 		}
2548 	}
2549 	return QDF_STATUS_SUCCESS;
2550 }
2551 #else
2552 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2553 	 struct dp_pdev *pdev)
2554 {
2555 	return QDF_STATUS_SUCCESS;
2556 }
2557 #endif
2558 
2559 /**
2560  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2561  * @pdev - DP_PDEV handle
2562  *
2563  * Return: void
2564  */
2565 static inline void
2566 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2567 {
2568 	uint8_t map_id;
2569 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2570 		qdf_mem_copy(pdev->dscp_tid_map[map_id], default_dscp_tid_map,
2571 				sizeof(default_dscp_tid_map));
2572 	}
2573 	for (map_id = 0; map_id < HAL_MAX_HW_DSCP_TID_MAPS; map_id++) {
2574 		hal_tx_set_dscp_tid_map(pdev->soc->hal_soc,
2575 				pdev->dscp_tid_map[map_id],
2576 				map_id);
2577 	}
2578 }
2579 
2580 #ifdef QCA_SUPPORT_SON
2581 /**
2582  * dp_mark_peer_inact(): Update peer inactivity status
2583  * @peer_handle - datapath peer handle
2584  *
2585  * Return: void
2586  */
2587 void dp_mark_peer_inact(void *peer_handle, bool inactive)
2588 {
2589 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2590 	struct dp_pdev *pdev;
2591 	struct dp_soc *soc;
2592 	bool inactive_old;
2593 
2594 	if (!peer)
2595 		return;
2596 
2597 	pdev = peer->vdev->pdev;
2598 	soc = pdev->soc;
2599 
2600 	inactive_old = peer->peer_bs_inact_flag == 1;
2601 	if (!inactive)
2602 		peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2603 	peer->peer_bs_inact_flag = inactive ? 1 : 0;
2604 
2605 	if (inactive_old != inactive) {
2606 		/**
2607 		 * Note: a node lookup can happen in RX datapath context
2608 		 * when a node changes from inactive to active (at most once
2609 		 * per inactivity timeout threshold)
2610 		 */
2611 		if (soc->cdp_soc.ol_ops->record_act_change) {
2612 			soc->cdp_soc.ol_ops->record_act_change(
2613 					(void *)pdev->ctrl_pdev,
2614 					peer->mac_addr.raw, !inactive);
2615 		}
2616 	}
2617 }
2618 
2619 /**
2620  * dp_txrx_peer_find_inact_timeout_handler(): Inactivity timeout function
2621  *
2622  * Periodically checks the inactivity status
2623  */
2624 static os_timer_func(dp_txrx_peer_find_inact_timeout_handler)
2625 {
2626 	struct dp_pdev *pdev;
2627 	struct dp_vdev *vdev;
2628 	struct dp_peer *peer;
2629 	struct dp_soc *soc;
2630 	int i;
2631 
2632 	OS_GET_TIMER_ARG(soc, struct dp_soc *);
2633 
2634 	qdf_spin_lock(&soc->peer_ref_mutex);
2635 
2636 	for (i = 0; i < soc->pdev_count; i++) {
2637 	pdev = soc->pdev_list[i];
2638 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
2639 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2640 		if (vdev->opmode != wlan_op_mode_ap)
2641 			continue;
2642 
2643 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2644 			if (!peer->authorize) {
2645 				/**
2646 				 * Inactivity check only interested in
2647 				 * connected node
2648 				 */
2649 				continue;
2650 			}
2651 			if (peer->peer_bs_inact > soc->pdev_bs_inact_reload) {
2652 				/**
2653 				 * This check ensures we do not wait extra long
2654 				 * due to the potential race condition
2655 				 */
2656 				peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2657 			}
2658 			if (peer->peer_bs_inact > 0) {
2659 				/* Do not let it wrap around */
2660 				peer->peer_bs_inact--;
2661 			}
2662 			if (peer->peer_bs_inact == 0)
2663 				dp_mark_peer_inact(peer, true);
2664 		}
2665 	}
2666 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2667 	}
2668 
2669 	qdf_spin_unlock(&soc->peer_ref_mutex);
2670 	qdf_timer_mod(&soc->pdev_bs_inact_timer,
2671 		      soc->pdev_bs_inact_interval * 1000);
2672 }
2673 
2674 
2675 /**
2676  * dp_free_inact_timer(): free inact timer
2677  * @timer - inact timer handle
2678  *
2679  * Return: bool
2680  */
2681 void dp_free_inact_timer(struct dp_soc *soc)
2682 {
2683 	qdf_timer_free(&soc->pdev_bs_inact_timer);
2684 }
2685 #else
2686 
2687 void dp_mark_peer_inact(void *peer, bool inactive)
2688 {
2689 	return;
2690 }
2691 
2692 void dp_free_inact_timer(struct dp_soc *soc)
2693 {
2694 	return;
2695 }
2696 
2697 #endif
2698 
2699 #ifdef IPA_OFFLOAD
2700 /**
2701  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
2702  * @soc: data path instance
2703  * @pdev: core txrx pdev context
2704  *
2705  * Return: QDF_STATUS_SUCCESS: success
2706  *         QDF_STATUS_E_RESOURCES: Error return
2707  */
2708 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2709 					   struct dp_pdev *pdev)
2710 {
2711 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2712 	int entries;
2713 
2714 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2715 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
2716 
2717 	/* Setup second Rx refill buffer ring */
2718 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2719 			  IPA_RX_REFILL_BUF_RING_IDX,
2720 			  pdev->pdev_id,
2721 			  entries)) {
2722 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2723 			FL("dp_srng_setup failed second rx refill ring"));
2724 		return QDF_STATUS_E_FAILURE;
2725 	}
2726 	return QDF_STATUS_SUCCESS;
2727 }
2728 
2729 /**
2730  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
2731  * @soc: data path instance
2732  * @pdev: core txrx pdev context
2733  *
2734  * Return: void
2735  */
2736 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2737 					      struct dp_pdev *pdev)
2738 {
2739 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2740 			IPA_RX_REFILL_BUF_RING_IDX);
2741 }
2742 
2743 #else
2744 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2745 					   struct dp_pdev *pdev)
2746 {
2747 	return QDF_STATUS_SUCCESS;
2748 }
2749 
2750 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2751 					      struct dp_pdev *pdev)
2752 {
2753 }
2754 #endif
2755 
2756 #if !defined(QCA_WIFI_QCA6390) && !defined(DISABLE_MON_CONFIG)
2757 static
2758 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2759 {
2760 	int mac_id = 0;
2761 	int pdev_id = pdev->pdev_id;
2762 	int entries;
2763 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2764 
2765 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2766 
2767 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
2768 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
2769 
2770 		entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
2771 		if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
2772 				  RXDMA_MONITOR_BUF, 0, mac_for_pdev,
2773 				  entries)) {
2774 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2775 				  FL(RNG_ERR "rxdma_mon_buf_ring "));
2776 			return QDF_STATUS_E_NOMEM;
2777 		}
2778 
2779 		entries = wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
2780 		if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
2781 				  RXDMA_MONITOR_DST, 0, mac_for_pdev,
2782 				  entries)) {
2783 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2784 				  FL(RNG_ERR "rxdma_mon_dst_ring"));
2785 			return QDF_STATUS_E_NOMEM;
2786 		}
2787 
2788 		entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
2789 		if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring[mac_id],
2790 				  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
2791 				  entries)) {
2792 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2793 				  FL(RNG_ERR "rxdma_mon_status_ring"));
2794 			return QDF_STATUS_E_NOMEM;
2795 		}
2796 
2797 		entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
2798 		if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
2799 				  RXDMA_MONITOR_DESC, 0, mac_for_pdev,
2800 				  entries)) {
2801 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2802 				  FL(RNG_ERR "rxdma_mon_desc_ring"));
2803 			return QDF_STATUS_E_NOMEM;
2804 		}
2805 	}
2806 	return QDF_STATUS_SUCCESS;
2807 }
2808 #else
2809 static QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2810 {
2811 	return QDF_STATUS_SUCCESS;
2812 }
2813 #endif
2814 
2815 /*dp_iterate_update_peer_list - update peer stats on cal client timer
2816  * @pdev_hdl: pdev handle
2817  */
2818 #ifdef ATH_SUPPORT_EXT_STAT
2819 void  dp_iterate_update_peer_list(void *pdev_hdl)
2820 {
2821 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
2822 	struct dp_vdev *vdev = NULL;
2823 	struct dp_peer *peer = NULL;
2824 
2825 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
2826 		DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
2827 			dp_cal_client_update_peer_stats(&peer->stats);
2828 		}
2829 	}
2830 }
2831 #else
2832 void  dp_iterate_update_peer_list(void *pdev_hdl)
2833 {
2834 }
2835 #endif
2836 
2837 /*
2838 * dp_pdev_attach_wifi3() - attach txrx pdev
2839 * @ctrl_pdev: Opaque PDEV object
2840 * @txrx_soc: Datapath SOC handle
2841 * @htc_handle: HTC handle for host-target interface
2842 * @qdf_osdev: QDF OS device
2843 * @pdev_id: PDEV ID
2844 *
2845 * Return: DP PDEV handle on success, NULL on failure
2846 */
2847 static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
2848 	struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
2849 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
2850 {
2851 	int tx_ring_size;
2852 	int tx_comp_ring_size;
2853 	int reo_dst_ring_size;
2854 	int entries;
2855 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2856 	int nss_cfg;
2857 
2858 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2859 	struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
2860 
2861 	if (!pdev) {
2862 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2863 			FL("DP PDEV memory allocation failed"));
2864 		goto fail0;
2865 	}
2866 
2867 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2868 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
2869 
2870 	if (!pdev->wlan_cfg_ctx) {
2871 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2872 			FL("pdev cfg_attach failed"));
2873 
2874 		qdf_mem_free(pdev);
2875 		goto fail0;
2876 	}
2877 
2878 	/*
2879 	 * set nss pdev config based on soc config
2880 	 */
2881 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
2882 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
2883 			(nss_cfg & (1 << pdev_id)));
2884 
2885 	pdev->soc = soc;
2886 	pdev->ctrl_pdev = ctrl_pdev;
2887 	pdev->pdev_id = pdev_id;
2888 	soc->pdev_list[pdev_id] = pdev;
2889 	soc->pdev_count++;
2890 
2891 	TAILQ_INIT(&pdev->vdev_list);
2892 	qdf_spinlock_create(&pdev->vdev_list_lock);
2893 	pdev->vdev_count = 0;
2894 
2895 	qdf_spinlock_create(&pdev->tx_mutex);
2896 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
2897 	TAILQ_INIT(&pdev->neighbour_peers_list);
2898 	pdev->neighbour_peers_added = false;
2899 
2900 	if (dp_soc_cmn_setup(soc)) {
2901 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2902 			FL("dp_soc_cmn_setup failed"));
2903 		goto fail1;
2904 	}
2905 
2906 	/* Setup per PDEV TCL rings if configured */
2907 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2908 		tx_ring_size =
2909 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2910 		tx_comp_ring_size =
2911 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2912 
2913 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
2914 			pdev_id, pdev_id, tx_ring_size)) {
2915 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2916 				FL("dp_srng_setup failed for tcl_data_ring"));
2917 			goto fail1;
2918 		}
2919 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
2920 			WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
2921 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2922 				FL("dp_srng_setup failed for tx_comp_ring"));
2923 			goto fail1;
2924 		}
2925 		soc->num_tcl_data_rings++;
2926 	}
2927 
2928 	/* Tx specific init */
2929 	if (dp_tx_pdev_attach(pdev)) {
2930 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2931 			FL("dp_tx_pdev_attach failed"));
2932 		goto fail1;
2933 	}
2934 
2935 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2936 	/* Setup per PDEV REO rings if configured */
2937 	if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2938 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
2939 			pdev_id, pdev_id, reo_dst_ring_size)) {
2940 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2941 				FL("dp_srng_setup failed for reo_dest_ringn"));
2942 			goto fail1;
2943 		}
2944 		soc->num_reo_dest_rings++;
2945 
2946 	}
2947 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
2948 		wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx))) {
2949 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2950 			 FL("dp_srng_setup failed rx refill ring"));
2951 		goto fail1;
2952 	}
2953 
2954 	if (dp_rxdma_ring_setup(soc, pdev)) {
2955 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2956 			 FL("RXDMA ring config failed"));
2957 		goto fail1;
2958 	}
2959 
2960 	if (dp_mon_rings_setup(soc, pdev)) {
2961 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2962 			  FL("MONITOR rings setup failed"));
2963 		goto fail1;
2964 	}
2965 
2966 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2967 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2968 		if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
2969 				  0, pdev_id,
2970 				  entries)) {
2971 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2972 				  FL(RNG_ERR "rxdma_err_dst_ring"));
2973 			goto fail1;
2974 		}
2975 	}
2976 
2977 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
2978 		goto fail1;
2979 
2980 	if (dp_ipa_ring_resource_setup(soc, pdev))
2981 		goto fail1;
2982 
2983 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
2984 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2985 			FL("dp_ipa_uc_attach failed"));
2986 		goto fail1;
2987 	}
2988 
2989 	/* Rx specific init */
2990 	if (dp_rx_pdev_attach(pdev)) {
2991 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2992 			FL("dp_rx_pdev_attach failed"));
2993 		goto fail0;
2994 	}
2995 	DP_STATS_INIT(pdev);
2996 
2997 	/* Monitor filter init */
2998 	pdev->mon_filter_mode = MON_FILTER_ALL;
2999 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
3000 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
3001 	pdev->fp_data_filter = FILTER_DATA_ALL;
3002 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
3003 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
3004 	pdev->mo_data_filter = FILTER_DATA_ALL;
3005 
3006 	dp_local_peer_id_pool_init(pdev);
3007 
3008 	dp_dscp_tid_map_setup(pdev);
3009 
3010 	/* Rx monitor mode specific init */
3011 	if (dp_rx_pdev_mon_attach(pdev)) {
3012 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3013 				"dp_rx_pdev_attach failed");
3014 		goto fail1;
3015 	}
3016 
3017 	if (dp_wdi_event_attach(pdev)) {
3018 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3019 				"dp_wdi_evet_attach failed");
3020 		goto fail1;
3021 	}
3022 
3023 	/* set the reo destination during initialization */
3024 	pdev->reo_dest = pdev->pdev_id + 1;
3025 
3026 	/*
3027 	 * initialize ppdu tlv list
3028 	 */
3029 	TAILQ_INIT(&pdev->ppdu_info_list);
3030 	pdev->tlv_count = 0;
3031 	pdev->list_depth = 0;
3032 
3033 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
3034 
3035 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
3036 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
3037 			      TRUE);
3038 
3039 	/* initlialize cal client timer */
3040 	dp_cal_client_attach(&pdev->cal_client_ctx, pdev, pdev->soc->osdev,
3041 			     &dp_iterate_update_peer_list);
3042 
3043 	return (struct cdp_pdev *)pdev;
3044 
3045 fail1:
3046 	dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
3047 
3048 fail0:
3049 	return NULL;
3050 }
3051 
3052 /*
3053 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
3054 * @soc: data path SoC handle
3055 * @pdev: Physical device handle
3056 *
3057 * Return: void
3058 */
3059 #ifdef QCA_HOST2FW_RXBUF_RING
3060 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3061 	 struct dp_pdev *pdev)
3062 {
3063 	int max_mac_rings =
3064 		 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
3065 	int i;
3066 
3067 	max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
3068 				max_mac_rings : MAX_RX_MAC_RINGS;
3069 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
3070 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
3071 			 RXDMA_BUF, 1);
3072 
3073 	qdf_timer_free(&soc->mon_reap_timer);
3074 }
3075 #else
3076 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3077 	 struct dp_pdev *pdev)
3078 {
3079 }
3080 #endif
3081 
3082 /*
3083  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3084  * @pdev: device object
3085  *
3086  * Return: void
3087  */
3088 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3089 {
3090 	struct dp_neighbour_peer *peer = NULL;
3091 	struct dp_neighbour_peer *temp_peer = NULL;
3092 
3093 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3094 			neighbour_peer_list_elem, temp_peer) {
3095 		/* delete this peer from the list */
3096 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
3097 				peer, neighbour_peer_list_elem);
3098 		qdf_mem_free(peer);
3099 	}
3100 
3101 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3102 }
3103 
3104 /**
3105 * dp_htt_ppdu_stats_detach() - detach stats resources
3106 * @pdev: Datapath PDEV handle
3107 *
3108 * Return: void
3109 */
3110 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3111 {
3112 	struct ppdu_info *ppdu_info, *ppdu_info_next;
3113 
3114 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3115 			ppdu_info_list_elem, ppdu_info_next) {
3116 		if (!ppdu_info)
3117 			break;
3118 		qdf_assert_always(ppdu_info->nbuf);
3119 		qdf_nbuf_free(ppdu_info->nbuf);
3120 		qdf_mem_free(ppdu_info);
3121 	}
3122 }
3123 
3124 #if !defined(QCA_WIFI_QCA6390) && !defined(DISABLE_MON_CONFIG)
3125 static
3126 void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3127 			int mac_id)
3128 {
3129 		dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
3130 				RXDMA_MONITOR_BUF, 0);
3131 		dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
3132 				RXDMA_MONITOR_DST, 0);
3133 
3134 		dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring[mac_id],
3135 				RXDMA_MONITOR_STATUS, 0);
3136 
3137 		dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
3138 				RXDMA_MONITOR_DESC, 0);
3139 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3140 				RXDMA_DST, 0);
3141 }
3142 #else
3143 static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3144 			       int mac_id)
3145 {
3146 }
3147 #endif
3148 
3149 /*
3150 * dp_pdev_detach_wifi3() - detach txrx pdev
3151 * @txrx_pdev: Datapath PDEV handle
3152 * @force: Force detach
3153 *
3154 */
3155 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
3156 {
3157 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3158 	struct dp_soc *soc = pdev->soc;
3159 	qdf_nbuf_t curr_nbuf, next_nbuf;
3160 	int mac_id;
3161 
3162 	dp_wdi_event_detach(pdev);
3163 
3164 	dp_tx_pdev_detach(pdev);
3165 
3166 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3167 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
3168 			TCL_DATA, pdev->pdev_id);
3169 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
3170 			WBM2SW_RELEASE, pdev->pdev_id);
3171 	}
3172 
3173 	dp_pktlogmod_exit(pdev);
3174 
3175 	dp_rx_pdev_detach(pdev);
3176 	dp_rx_pdev_mon_detach(pdev);
3177 	dp_neighbour_peers_detach(pdev);
3178 	qdf_spinlock_destroy(&pdev->tx_mutex);
3179 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
3180 
3181 	dp_ipa_uc_detach(soc, pdev);
3182 
3183 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
3184 
3185 	/* Cleanup per PDEV REO rings if configured */
3186 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3187 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
3188 			REO_DST, pdev->pdev_id);
3189 	}
3190 
3191 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
3192 
3193 	dp_rxdma_ring_cleanup(soc, pdev);
3194 
3195 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3196 		dp_mon_ring_deinit(soc, pdev, mac_id);
3197 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3198 			RXDMA_DST, 0);
3199 	}
3200 
3201 	curr_nbuf = pdev->invalid_peer_head_msdu;
3202 	while (curr_nbuf) {
3203 		next_nbuf = qdf_nbuf_next(curr_nbuf);
3204 		qdf_nbuf_free(curr_nbuf);
3205 		curr_nbuf = next_nbuf;
3206 	}
3207 
3208 	dp_htt_ppdu_stats_detach(pdev);
3209 
3210 	qdf_nbuf_free(pdev->sojourn_buf);
3211 
3212 	dp_cal_client_detach(&pdev->cal_client_ctx);
3213 	soc->pdev_list[pdev->pdev_id] = NULL;
3214 	soc->pdev_count--;
3215 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
3216 	qdf_mem_free(pdev->dp_txrx_handle);
3217 	qdf_mem_free(pdev);
3218 }
3219 
3220 /*
3221  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
3222  * @soc: DP SOC handle
3223  */
3224 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
3225 {
3226 	struct reo_desc_list_node *desc;
3227 	struct dp_rx_tid *rx_tid;
3228 
3229 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3230 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
3231 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3232 		rx_tid = &desc->rx_tid;
3233 		qdf_mem_unmap_nbytes_single(soc->osdev,
3234 			rx_tid->hw_qdesc_paddr,
3235 			QDF_DMA_BIDIRECTIONAL,
3236 			rx_tid->hw_qdesc_alloc_size);
3237 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3238 		qdf_mem_free(desc);
3239 	}
3240 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3241 	qdf_list_destroy(&soc->reo_desc_freelist);
3242 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
3243 }
3244 
3245 /*
3246  * dp_soc_detach_wifi3() - Detach txrx SOC
3247  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
3248  */
3249 static void dp_soc_detach_wifi3(void *txrx_soc)
3250 {
3251 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3252 	int i;
3253 
3254 	qdf_atomic_set(&soc->cmn_init_done, 0);
3255 
3256 	qdf_flush_work(&soc->htt_stats.work);
3257 	qdf_disable_work(&soc->htt_stats.work);
3258 
3259 	/* Free pending htt stats messages */
3260 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
3261 
3262 	dp_free_inact_timer(soc);
3263 
3264 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3265 		if (soc->pdev_list[i])
3266 			dp_pdev_detach_wifi3(
3267 				(struct cdp_pdev *)soc->pdev_list[i], 1);
3268 	}
3269 
3270 	dp_peer_find_detach(soc);
3271 
3272 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
3273 	 * SW descriptors
3274 	 */
3275 
3276 	/* Free the ring memories */
3277 	/* Common rings */
3278 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3279 
3280 	dp_tx_soc_detach(soc);
3281 	/* Tx data rings */
3282 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3283 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3284 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
3285 				TCL_DATA, i);
3286 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
3287 				WBM2SW_RELEASE, i);
3288 		}
3289 	}
3290 
3291 	/* TCL command and status rings */
3292 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3293 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3294 
3295 	/* Rx data rings */
3296 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3297 		soc->num_reo_dest_rings =
3298 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3299 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
3300 			/* TODO: Get number of rings and ring sizes
3301 			 * from wlan_cfg
3302 			 */
3303 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
3304 				REO_DST, i);
3305 		}
3306 	}
3307 	/* REO reinjection ring */
3308 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3309 
3310 	/* Rx release ring */
3311 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3312 
3313 	/* Rx exception ring */
3314 	/* TODO: Better to store ring_type and ring_num in
3315 	 * dp_srng during setup
3316 	 */
3317 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3318 
3319 	/* REO command and status rings */
3320 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3321 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
3322 	dp_hw_link_desc_pool_cleanup(soc);
3323 
3324 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
3325 	qdf_spinlock_destroy(&soc->htt_stats.lock);
3326 
3327 	htt_soc_detach(soc->htt_handle);
3328 
3329 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3330 
3331 	dp_reo_cmdlist_destroy(soc);
3332 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3333 	dp_reo_desc_freelist_destroy(soc);
3334 
3335 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
3336 
3337 	dp_soc_wds_detach(soc);
3338 	qdf_spinlock_destroy(&soc->ast_lock);
3339 
3340 	qdf_mem_free(soc);
3341 }
3342 
3343 #if !defined(QCA_WIFI_QCA6390) && !defined(DISABLE_MON_CONFIG)
3344 static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3345 				  struct dp_pdev *pdev,
3346 				  int mac_id,
3347 				  int mac_for_pdev)
3348 {
3349 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3350 		       pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3351 		       RXDMA_MONITOR_BUF);
3352 
3353 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3354 		       pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3355 		       RXDMA_MONITOR_DST);
3356 
3357 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3358 		       pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3359 		       RXDMA_MONITOR_STATUS);
3360 
3361 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3362 		       pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3363 		       RXDMA_MONITOR_DESC);
3364 }
3365 #else
3366 static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3367 				  struct dp_pdev *pdev,
3368 				  int mac_id,
3369 				  int mac_for_pdev)
3370 {
3371 }
3372 #endif
3373 /*
3374  * dp_rxdma_ring_config() - configure the RX DMA rings
3375  *
3376  * This function is used to configure the MAC rings.
3377  * On MCL host provides buffers in Host2FW ring
3378  * FW refills (copies) buffers to the ring and updates
3379  * ring_idx in register
3380  *
3381  * @soc: data path SoC handle
3382  *
3383  * Return: void
3384  */
3385 #ifdef QCA_HOST2FW_RXBUF_RING
3386 static void dp_rxdma_ring_config(struct dp_soc *soc)
3387 {
3388 	int i;
3389 
3390 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3391 		struct dp_pdev *pdev = soc->pdev_list[i];
3392 
3393 		if (pdev) {
3394 			int mac_id;
3395 			bool dbs_enable = 0;
3396 			int max_mac_rings =
3397 				 wlan_cfg_get_num_mac_rings
3398 				(pdev->wlan_cfg_ctx);
3399 
3400 			htt_srng_setup(soc->htt_handle, 0,
3401 				 pdev->rx_refill_buf_ring.hal_srng,
3402 				 RXDMA_BUF);
3403 
3404 			if (pdev->rx_refill_buf_ring2.hal_srng)
3405 				htt_srng_setup(soc->htt_handle, 0,
3406 					pdev->rx_refill_buf_ring2.hal_srng,
3407 					RXDMA_BUF);
3408 
3409 			if (soc->cdp_soc.ol_ops->
3410 				is_hw_dbs_2x2_capable) {
3411 				dbs_enable = soc->cdp_soc.ol_ops->
3412 					is_hw_dbs_2x2_capable(soc->ctrl_psoc);
3413 			}
3414 
3415 			if (dbs_enable) {
3416 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3417 				QDF_TRACE_LEVEL_ERROR,
3418 				FL("DBS enabled max_mac_rings %d"),
3419 					 max_mac_rings);
3420 			} else {
3421 				max_mac_rings = 1;
3422 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3423 					 QDF_TRACE_LEVEL_ERROR,
3424 					 FL("DBS disabled, max_mac_rings %d"),
3425 					 max_mac_rings);
3426 			}
3427 
3428 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3429 					 FL("pdev_id %d max_mac_rings %d"),
3430 					 pdev->pdev_id, max_mac_rings);
3431 
3432 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
3433 				int mac_for_pdev = dp_get_mac_id_for_pdev(
3434 							mac_id, pdev->pdev_id);
3435 
3436 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3437 					 QDF_TRACE_LEVEL_ERROR,
3438 					 FL("mac_id %d"), mac_for_pdev);
3439 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3440 					 pdev->rx_mac_buf_ring[mac_id]
3441 						.hal_srng,
3442 					 RXDMA_BUF);
3443 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3444 					pdev->rxdma_err_dst_ring[mac_id]
3445 						.hal_srng,
3446 					RXDMA_DST);
3447 
3448 				/* Configure monitor mode rings */
3449 				dp_mon_htt_srng_setup(soc, pdev, mac_id,
3450 						      mac_for_pdev);
3451 
3452 			}
3453 		}
3454 	}
3455 
3456 	/*
3457 	 * Timer to reap rxdma status rings.
3458 	 * Needed until we enable ppdu end interrupts
3459 	 */
3460 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
3461 			dp_service_mon_rings, (void *)soc,
3462 			QDF_TIMER_TYPE_WAKE_APPS);
3463 	soc->reap_timer_init = 1;
3464 }
3465 #else
3466 /* This is only for WIN */
3467 static void dp_rxdma_ring_config(struct dp_soc *soc)
3468 {
3469 	int i;
3470 	int mac_id;
3471 
3472 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3473 		struct dp_pdev *pdev = soc->pdev_list[i];
3474 
3475 		if (pdev == NULL)
3476 			continue;
3477 
3478 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3479 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
3480 
3481 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3482 				pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
3483 #ifndef DISABLE_MON_CONFIG
3484 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3485 				pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3486 				RXDMA_MONITOR_BUF);
3487 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3488 				pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3489 				RXDMA_MONITOR_DST);
3490 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3491 				pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3492 				RXDMA_MONITOR_STATUS);
3493 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3494 				pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3495 				RXDMA_MONITOR_DESC);
3496 #endif
3497 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3498 				pdev->rxdma_err_dst_ring[mac_id].hal_srng,
3499 				RXDMA_DST);
3500 		}
3501 	}
3502 }
3503 #endif
3504 
3505 /*
3506  * dp_soc_attach_target_wifi3() - SOC initialization in the target
3507  * @txrx_soc: Datapath SOC handle
3508  */
3509 static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
3510 {
3511 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
3512 
3513 	htt_soc_attach_target(soc->htt_handle);
3514 
3515 	dp_rxdma_ring_config(soc);
3516 
3517 	DP_STATS_INIT(soc);
3518 
3519 	/* initialize work queue for stats processing */
3520 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
3521 
3522 	return 0;
3523 }
3524 
3525 /*
3526  * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
3527  * @txrx_soc: Datapath SOC handle
3528  */
3529 static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
3530 {
3531 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3532 	return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
3533 }
3534 /*
3535  * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
3536  * @txrx_soc: Datapath SOC handle
3537  * @nss_cfg: nss config
3538  */
3539 static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
3540 {
3541 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3542 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
3543 
3544 	wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
3545 
3546 	/*
3547 	 * TODO: masked out based on the per offloaded radio
3548 	 */
3549 	if (config == dp_nss_cfg_dbdc) {
3550 		wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
3551 		wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
3552 		wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
3553 		wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
3554 	}
3555 
3556 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3557 		  FL("nss-wifi<0> nss config is enabled"));
3558 }
3559 /*
3560 * dp_vdev_attach_wifi3() - attach txrx vdev
3561 * @txrx_pdev: Datapath PDEV handle
3562 * @vdev_mac_addr: MAC address of the virtual interface
3563 * @vdev_id: VDEV Id
3564 * @wlan_op_mode: VDEV operating mode
3565 *
3566 * Return: DP VDEV handle on success, NULL on failure
3567 */
3568 static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
3569 	uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
3570 {
3571 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3572 	struct dp_soc *soc = pdev->soc;
3573 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
3574 
3575 	if (!vdev) {
3576 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3577 			FL("DP VDEV memory allocation failed"));
3578 		goto fail0;
3579 	}
3580 
3581 	vdev->pdev = pdev;
3582 	vdev->vdev_id = vdev_id;
3583 	vdev->opmode = op_mode;
3584 	vdev->osdev = soc->osdev;
3585 
3586 	vdev->osif_rx = NULL;
3587 	vdev->osif_rsim_rx_decap = NULL;
3588 	vdev->osif_get_key = NULL;
3589 	vdev->osif_rx_mon = NULL;
3590 	vdev->osif_tx_free_ext = NULL;
3591 	vdev->osif_vdev = NULL;
3592 
3593 	vdev->delete.pending = 0;
3594 	vdev->safemode = 0;
3595 	vdev->drop_unenc = 1;
3596 	vdev->sec_type = cdp_sec_type_none;
3597 #ifdef notyet
3598 	vdev->filters_num = 0;
3599 #endif
3600 
3601 	qdf_mem_copy(
3602 		&vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3603 
3604 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3605 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3606 	vdev->dscp_tid_map_id = 0;
3607 	vdev->mcast_enhancement_en = 0;
3608 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
3609 
3610 	/* TODO: Initialize default HTT meta data that will be used in
3611 	 * TCL descriptors for packets transmitted from this VDEV
3612 	 */
3613 
3614 	TAILQ_INIT(&vdev->peer_list);
3615 
3616 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3617 	/* add this vdev into the pdev's list */
3618 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
3619 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3620 	pdev->vdev_count++;
3621 
3622 	dp_tx_vdev_attach(vdev);
3623 
3624 
3625 	if ((soc->intr_mode == DP_INTR_POLL) &&
3626 			wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
3627 		if (pdev->vdev_count == 1)
3628 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
3629 	}
3630 
3631 	dp_lro_hash_setup(soc);
3632 
3633 	/* LRO */
3634 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3635 		wlan_op_mode_sta == vdev->opmode)
3636 		vdev->lro_enable = true;
3637 
3638 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3639 		 "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable);
3640 
3641 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3642 		"Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
3643 	DP_STATS_INIT(vdev);
3644 
3645 	if (wlan_op_mode_sta == vdev->opmode)
3646 		dp_peer_create_wifi3((struct cdp_vdev *)vdev,
3647 							vdev->mac_addr.raw,
3648 							NULL);
3649 
3650 	return (struct cdp_vdev *)vdev;
3651 
3652 fail0:
3653 	return NULL;
3654 }
3655 
3656 /**
3657  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
3658  * @vdev: Datapath VDEV handle
3659  * @osif_vdev: OSIF vdev handle
3660  * @ctrl_vdev: UMAC vdev handle
3661  * @txrx_ops: Tx and Rx operations
3662  *
3663  * Return: DP VDEV handle on success, NULL on failure
3664  */
3665 static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
3666 	void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
3667 	struct ol_txrx_ops *txrx_ops)
3668 {
3669 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3670 	vdev->osif_vdev = osif_vdev;
3671 	vdev->ctrl_vdev = ctrl_vdev;
3672 	vdev->osif_rx = txrx_ops->rx.rx;
3673 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
3674 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
3675 	vdev->osif_get_key = txrx_ops->get_key;
3676 	vdev->osif_rx_mon = txrx_ops->rx.mon;
3677 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
3678 #ifdef notyet
3679 #if ATH_SUPPORT_WAPI
3680 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
3681 #endif
3682 #endif
3683 #ifdef UMAC_SUPPORT_PROXY_ARP
3684 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
3685 #endif
3686 	vdev->me_convert = txrx_ops->me_convert;
3687 
3688 	/* TODO: Enable the following once Tx code is integrated */
3689 	if (vdev->mesh_vdev)
3690 		txrx_ops->tx.tx = dp_tx_send_mesh;
3691 	else
3692 		txrx_ops->tx.tx = dp_tx_send;
3693 
3694 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
3695 
3696 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
3697 		"DP Vdev Register success");
3698 }
3699 
3700 /**
3701  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
3702  * @vdev: Datapath VDEV handle
3703  *
3704  * Return: void
3705  */
3706 static void dp_vdev_flush_peers(struct dp_vdev *vdev)
3707 {
3708 	struct dp_pdev *pdev = vdev->pdev;
3709 	struct dp_soc *soc = pdev->soc;
3710 	struct dp_peer *peer;
3711 	uint16_t *peer_ids;
3712 	uint8_t i = 0, j = 0;
3713 
3714 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
3715 	if (!peer_ids) {
3716 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3717 			"DP alloc failure - unable to flush peers");
3718 		return;
3719 	}
3720 
3721 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3722 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3723 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3724 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
3725 				if (j < soc->max_peers)
3726 					peer_ids[j++] = peer->peer_ids[i];
3727 	}
3728 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3729 
3730 	for (i = 0; i < j ; i++)
3731 		dp_rx_peer_unmap_handler(soc, peer_ids[i]);
3732 
3733 	qdf_mem_free(peer_ids);
3734 
3735 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3736 		FL("Flushed peers for vdev object %pK "), vdev);
3737 }
3738 
3739 /*
3740  * dp_vdev_detach_wifi3() - Detach txrx vdev
3741  * @txrx_vdev:		Datapath VDEV handle
3742  * @callback:		Callback OL_IF on completion of detach
3743  * @cb_context:	Callback context
3744  *
3745  */
3746 static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
3747 	ol_txrx_vdev_delete_cb callback, void *cb_context)
3748 {
3749 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3750 	struct dp_pdev *pdev = vdev->pdev;
3751 	struct dp_soc *soc = pdev->soc;
3752 	struct dp_neighbour_peer *peer = NULL;
3753 
3754 	/* preconditions */
3755 	qdf_assert(vdev);
3756 
3757 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3758 	/* remove the vdev from its parent pdev's list */
3759 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
3760 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3761 
3762 	if (wlan_op_mode_sta == vdev->opmode)
3763 		dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
3764 
3765 	/*
3766 	 * If Target is hung, flush all peers before detaching vdev
3767 	 * this will free all references held due to missing
3768 	 * unmap commands from Target
3769 	 */
3770 	if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
3771 		dp_vdev_flush_peers(vdev);
3772 
3773 	/*
3774 	 * Use peer_ref_mutex while accessing peer_list, in case
3775 	 * a peer is in the process of being removed from the list.
3776 	 */
3777 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3778 	/* check that the vdev has no peers allocated */
3779 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
3780 		/* debug print - will be removed later */
3781 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
3782 			FL("not deleting vdev object %pK (%pM)"
3783 			"until deletion finishes for all its peers"),
3784 			vdev, vdev->mac_addr.raw);
3785 		/* indicate that the vdev needs to be deleted */
3786 		vdev->delete.pending = 1;
3787 		vdev->delete.callback = callback;
3788 		vdev->delete.context = cb_context;
3789 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3790 		return;
3791 	}
3792 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3793 
3794 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3795 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
3796 		      neighbour_peer_list_elem) {
3797 		QDF_ASSERT(peer->vdev != vdev);
3798 	}
3799 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3800 
3801 	dp_tx_vdev_detach(vdev);
3802 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3803 		FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
3804 
3805 	qdf_mem_free(vdev);
3806 
3807 	if (callback)
3808 		callback(cb_context);
3809 }
3810 
3811 /*
3812  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
3813  * @soc - datapath soc handle
3814  * @peer - datapath peer handle
3815  *
3816  * Delete the AST entries belonging to a peer
3817  */
3818 #ifdef FEATURE_AST
3819 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3820 					      struct dp_peer *peer)
3821 {
3822 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
3823 
3824 	qdf_spin_lock_bh(&soc->ast_lock);
3825 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
3826 		dp_peer_del_ast(soc, ast_entry);
3827 
3828 	peer->self_ast_entry = NULL;
3829 	TAILQ_INIT(&peer->ast_entry_list);
3830 	qdf_spin_unlock_bh(&soc->ast_lock);
3831 }
3832 #else
3833 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3834 					      struct dp_peer *peer)
3835 {
3836 }
3837 #endif
3838 
3839 #if ATH_SUPPORT_WRAP
3840 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
3841 						uint8_t *peer_mac_addr)
3842 {
3843 	struct dp_peer *peer;
3844 
3845 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
3846 				      0, vdev->vdev_id);
3847 	if (!peer)
3848 		return NULL;
3849 
3850 	if (peer->bss_peer)
3851 		return peer;
3852 
3853 	qdf_atomic_dec(&peer->ref_cnt);
3854 	return NULL;
3855 }
3856 #else
3857 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
3858 						uint8_t *peer_mac_addr)
3859 {
3860 	struct dp_peer *peer;
3861 
3862 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
3863 				      0, vdev->vdev_id);
3864 	if (!peer)
3865 		return NULL;
3866 
3867 	if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
3868 		return peer;
3869 
3870 	qdf_atomic_dec(&peer->ref_cnt);
3871 	return NULL;
3872 }
3873 #endif
3874 
3875 /*
3876  * dp_peer_create_wifi3() - attach txrx peer
3877  * @txrx_vdev: Datapath VDEV handle
3878  * @peer_mac_addr: Peer MAC address
3879  *
3880  * Return: DP peeer handle on success, NULL on failure
3881  */
3882 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
3883 		uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
3884 {
3885 	struct dp_peer *peer;
3886 	int i;
3887 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3888 	struct dp_pdev *pdev;
3889 	struct dp_soc *soc;
3890 	struct dp_ast_entry *ast_entry;
3891 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
3892 
3893 	/* preconditions */
3894 	qdf_assert(vdev);
3895 	qdf_assert(peer_mac_addr);
3896 
3897 	pdev = vdev->pdev;
3898 	soc = pdev->soc;
3899 
3900 	/*
3901 	 * If a peer entry with given MAC address already exists,
3902 	 * reuse the peer and reset the state of peer.
3903 	 */
3904 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
3905 
3906 	if (peer) {
3907 		peer->delete_in_progress = false;
3908 
3909 		dp_peer_delete_ast_entries(soc, peer);
3910 
3911 		if ((vdev->opmode == wlan_op_mode_sta) &&
3912 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
3913 		     DP_MAC_ADDR_LEN)) {
3914 			ast_type = CDP_TXRX_AST_TYPE_SELF;
3915 		}
3916 
3917 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
3918 
3919 		/*
3920 		* Control path maintains a node count which is incremented
3921 		* for every new peer create command. Since new peer is not being
3922 		* created and earlier reference is reused here,
3923 		* peer_unref_delete event is sent to control path to
3924 		* increment the count back.
3925 		*/
3926 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
3927 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
3928 				vdev->vdev_id, peer->mac_addr.raw);
3929 		}
3930 		peer->ctrl_peer = ctrl_peer;
3931 
3932 		dp_local_peer_id_alloc(pdev, peer);
3933 		DP_STATS_INIT(peer);
3934 
3935 		return (void *)peer;
3936 	} else {
3937 		/*
3938 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
3939 		 * need to remove the AST entry which was earlier added as a WDS
3940 		 * entry.
3941 		 * If an AST entry exists, but no peer entry exists with a given
3942 		 * MAC addresses, we could deduce it as a WDS entry
3943 		 */
3944 		qdf_spin_lock_bh(&soc->ast_lock);
3945 		ast_entry = dp_peer_ast_hash_find(soc, peer_mac_addr);
3946 		if (ast_entry)
3947 			dp_peer_del_ast(soc, ast_entry);
3948 		qdf_spin_unlock_bh(&soc->ast_lock);
3949 	}
3950 
3951 #ifdef notyet
3952 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
3953 		soc->mempool_ol_ath_peer);
3954 #else
3955 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
3956 #endif
3957 
3958 	if (!peer)
3959 		return NULL; /* failure */
3960 
3961 	qdf_mem_zero(peer, sizeof(struct dp_peer));
3962 
3963 	TAILQ_INIT(&peer->ast_entry_list);
3964 
3965 	/* store provided params */
3966 	peer->vdev = vdev;
3967 	peer->ctrl_peer = ctrl_peer;
3968 
3969 	if ((vdev->opmode == wlan_op_mode_sta) &&
3970 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
3971 			 DP_MAC_ADDR_LEN)) {
3972 		ast_type = CDP_TXRX_AST_TYPE_SELF;
3973 	}
3974 
3975 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
3976 
3977 	qdf_spinlock_create(&peer->peer_info_lock);
3978 
3979 	qdf_mem_copy(
3980 		&peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3981 
3982 	/* TODO: See of rx_opt_proc is really required */
3983 	peer->rx_opt_proc = soc->rx_opt_proc;
3984 
3985 	/* initialize the peer_id */
3986 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3987 		peer->peer_ids[i] = HTT_INVALID_PEER;
3988 
3989 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3990 
3991 	qdf_atomic_init(&peer->ref_cnt);
3992 
3993 	/* keep one reference for attach */
3994 	qdf_atomic_inc(&peer->ref_cnt);
3995 
3996 	/* add this peer into the vdev's list */
3997 	if (wlan_op_mode_sta == vdev->opmode)
3998 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
3999 	else
4000 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
4001 
4002 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4003 
4004 	/* TODO: See if hash based search is required */
4005 	dp_peer_find_hash_add(soc, peer);
4006 
4007 	/* Initialize the peer state */
4008 	peer->state = OL_TXRX_PEER_STATE_DISC;
4009 
4010 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4011 		"vdev %pK created peer %pK (%pM) ref_cnt: %d",
4012 		vdev, peer, peer->mac_addr.raw,
4013 		qdf_atomic_read(&peer->ref_cnt));
4014 	/*
4015 	 * For every peer MAp message search and set if bss_peer
4016 	 */
4017 	if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
4018 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4019 			"vdev bss_peer!!!!");
4020 		peer->bss_peer = 1;
4021 		vdev->vap_bss_peer = peer;
4022 	}
4023 	for (i = 0; i < DP_MAX_TIDS; i++)
4024 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
4025 
4026 	dp_local_peer_id_alloc(pdev, peer);
4027 	DP_STATS_INIT(peer);
4028 	return (void *)peer;
4029 }
4030 
4031 /*
4032  * dp_peer_setup_wifi3() - initialize the peer
4033  * @vdev_hdl: virtual device object
4034  * @peer: Peer object
4035  *
4036  * Return: void
4037  */
4038 static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
4039 {
4040 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
4041 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4042 	struct dp_pdev *pdev;
4043 	struct dp_soc *soc;
4044 	bool hash_based = 0;
4045 	enum cdp_host_reo_dest_ring reo_dest;
4046 
4047 	/* preconditions */
4048 	qdf_assert(vdev);
4049 	qdf_assert(peer);
4050 
4051 	pdev = vdev->pdev;
4052 	soc = pdev->soc;
4053 
4054 	peer->last_assoc_rcvd = 0;
4055 	peer->last_disassoc_rcvd = 0;
4056 	peer->last_deauth_rcvd = 0;
4057 
4058 	/*
4059 	 * hash based steering is disabled for Radios which are offloaded
4060 	 * to NSS
4061 	 */
4062 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
4063 		hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
4064 
4065 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4066 		FL("hash based steering for pdev: %d is %d"),
4067 		pdev->pdev_id, hash_based);
4068 
4069 	/*
4070 	 * Below line of code will ensure the proper reo_dest ring is chosen
4071 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
4072 	 */
4073 	reo_dest = pdev->reo_dest;
4074 
4075 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
4076 		/* TODO: Check the destination ring number to be passed to FW */
4077 		soc->cdp_soc.ol_ops->peer_set_default_routing(
4078 				pdev->ctrl_pdev, peer->mac_addr.raw,
4079 				peer->vdev->vdev_id, hash_based, reo_dest);
4080 	}
4081 
4082 	dp_peer_rx_init(pdev, peer);
4083 	return;
4084 }
4085 
4086 /*
4087  * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
4088  * @vdev_handle: virtual device object
4089  * @htt_pkt_type: type of pkt
4090  *
4091  * Return: void
4092  */
4093 static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
4094 	 enum htt_cmn_pkt_type val)
4095 {
4096 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4097 	vdev->tx_encap_type = val;
4098 }
4099 
4100 /*
4101  * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
4102  * @vdev_handle: virtual device object
4103  * @htt_pkt_type: type of pkt
4104  *
4105  * Return: void
4106  */
4107 static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
4108 	 enum htt_cmn_pkt_type val)
4109 {
4110 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4111 	vdev->rx_decap_type = val;
4112 }
4113 
4114 /*
4115  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
4116  * @txrx_soc: cdp soc handle
4117  * @ac: Access category
4118  * @value: timeout value in millisec
4119  *
4120  * Return: void
4121  */
4122 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
4123 				    uint8_t ac, uint32_t value)
4124 {
4125 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4126 
4127 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
4128 }
4129 
4130 /*
4131  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
4132  * @txrx_soc: cdp soc handle
4133  * @ac: access category
4134  * @value: timeout value in millisec
4135  *
4136  * Return: void
4137  */
4138 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
4139 				    uint8_t ac, uint32_t *value)
4140 {
4141 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4142 
4143 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
4144 }
4145 
4146 /*
4147  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
4148  * @pdev_handle: physical device object
4149  * @val: reo destination ring index (1 - 4)
4150  *
4151  * Return: void
4152  */
4153 static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
4154 	 enum cdp_host_reo_dest_ring val)
4155 {
4156 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4157 
4158 	if (pdev)
4159 		pdev->reo_dest = val;
4160 }
4161 
4162 /*
4163  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
4164  * @pdev_handle: physical device object
4165  *
4166  * Return: reo destination ring index
4167  */
4168 static enum cdp_host_reo_dest_ring
4169 dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
4170 {
4171 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4172 
4173 	if (pdev)
4174 		return pdev->reo_dest;
4175 	else
4176 		return cdp_host_reo_dest_ring_unknown;
4177 }
4178 
4179 #ifdef QCA_SUPPORT_SON
4180 static void dp_son_peer_authorize(struct dp_peer *peer)
4181 {
4182 	struct dp_soc *soc;
4183 	soc = peer->vdev->pdev->soc;
4184 	peer->peer_bs_inact_flag = 0;
4185 	peer->peer_bs_inact = soc->pdev_bs_inact_reload;
4186 	return;
4187 }
4188 #else
4189 static void dp_son_peer_authorize(struct dp_peer *peer)
4190 {
4191 	return;
4192 }
4193 #endif
4194 /*
4195  * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
4196  * @pdev_handle: device object
4197  * @val: value to be set
4198  *
4199  * Return: void
4200  */
4201 static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
4202 	 uint32_t val)
4203 {
4204 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4205 
4206 	/* Enable/Disable smart mesh filtering. This flag will be checked
4207 	 * during rx processing to check if packets are from NAC clients.
4208 	 */
4209 	pdev->filter_neighbour_peers = val;
4210 	return 0;
4211 }
4212 
4213 /*
4214  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
4215  * address for smart mesh filtering
4216  * @vdev_handle: virtual device object
4217  * @cmd: Add/Del command
4218  * @macaddr: nac client mac address
4219  *
4220  * Return: void
4221  */
4222 static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
4223 					    uint32_t cmd, uint8_t *macaddr)
4224 {
4225 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4226 	struct dp_pdev *pdev = vdev->pdev;
4227 	struct dp_neighbour_peer *peer = NULL;
4228 
4229 	if (!macaddr)
4230 		goto fail0;
4231 
4232 	/* Store address of NAC (neighbour peer) which will be checked
4233 	 * against TA of received packets.
4234 	 */
4235 	if (cmd == DP_NAC_PARAM_ADD) {
4236 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
4237 				sizeof(*peer));
4238 
4239 		if (!peer) {
4240 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4241 				FL("DP neighbour peer node memory allocation failed"));
4242 			goto fail0;
4243 		}
4244 
4245 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
4246 			macaddr, DP_MAC_ADDR_LEN);
4247 		peer->vdev = vdev;
4248 
4249 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4250 
4251 		/* add this neighbour peer into the list */
4252 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
4253 				neighbour_peer_list_elem);
4254 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4255 
4256 		/* first neighbour */
4257 		if (!pdev->neighbour_peers_added) {
4258 			pdev->neighbour_peers_added = true;
4259 			dp_ppdu_ring_cfg(pdev);
4260 		}
4261 		return 1;
4262 
4263 	} else if (cmd == DP_NAC_PARAM_DEL) {
4264 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4265 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
4266 				neighbour_peer_list_elem) {
4267 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
4268 				macaddr, DP_MAC_ADDR_LEN)) {
4269 				/* delete this peer from the list */
4270 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
4271 					peer, neighbour_peer_list_elem);
4272 				qdf_mem_free(peer);
4273 				break;
4274 			}
4275 		}
4276 		/* last neighbour deleted */
4277 		if (TAILQ_EMPTY(&pdev->neighbour_peers_list))
4278 			pdev->neighbour_peers_added = false;
4279 
4280 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4281 
4282 		if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
4283 		    !pdev->enhanced_stats_en)
4284 			dp_ppdu_ring_reset(pdev);
4285 		return 1;
4286 
4287 	}
4288 
4289 fail0:
4290 	return 0;
4291 }
4292 
4293 /*
4294  * dp_get_sec_type() - Get the security type
4295  * @peer:		Datapath peer handle
4296  * @sec_idx:    Security id (mcast, ucast)
4297  *
4298  * return sec_type: Security type
4299  */
4300 static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
4301 {
4302 	struct dp_peer *dpeer = (struct dp_peer *)peer;
4303 
4304 	return dpeer->security[sec_idx].sec_type;
4305 }
4306 
4307 /*
4308  * dp_peer_authorize() - authorize txrx peer
4309  * @peer_handle:		Datapath peer handle
4310  * @authorize
4311  *
4312  */
4313 static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
4314 {
4315 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4316 	struct dp_soc *soc;
4317 
4318 	if (peer != NULL) {
4319 		soc = peer->vdev->pdev->soc;
4320 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
4321 		dp_son_peer_authorize(peer);
4322 		peer->authorize = authorize ? 1 : 0;
4323 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4324 	}
4325 }
4326 
4327 #ifdef QCA_SUPPORT_SON
4328 /*
4329  * dp_txrx_update_inact_threshold() - Update inact timer threshold
4330  * @pdev_handle: Device handle
4331  * @new_threshold : updated threshold value
4332  *
4333  */
4334 static void
4335 dp_txrx_update_inact_threshold(struct cdp_pdev *pdev_handle,
4336 			       u_int16_t new_threshold)
4337 {
4338 	struct dp_vdev *vdev;
4339 	struct dp_peer *peer;
4340 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4341 	struct dp_soc *soc = pdev->soc;
4342 	u_int16_t old_threshold = soc->pdev_bs_inact_reload;
4343 
4344 	if (old_threshold == new_threshold)
4345 		return;
4346 
4347 	soc->pdev_bs_inact_reload = new_threshold;
4348 
4349 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4350 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4351 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4352 		if (vdev->opmode != wlan_op_mode_ap)
4353 			continue;
4354 
4355 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4356 			if (!peer->authorize)
4357 				continue;
4358 
4359 			if (old_threshold - peer->peer_bs_inact >=
4360 					new_threshold) {
4361 				dp_mark_peer_inact((void *)peer, true);
4362 				peer->peer_bs_inact = 0;
4363 			} else {
4364 				peer->peer_bs_inact = new_threshold -
4365 					(old_threshold - peer->peer_bs_inact);
4366 			}
4367 		}
4368 	}
4369 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4370 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4371 }
4372 
4373 /**
4374  * dp_txrx_reset_inact_count(): Reset inact count
4375  * @pdev_handle - device handle
4376  *
4377  * Return: void
4378  */
4379 static void
4380 dp_txrx_reset_inact_count(struct cdp_pdev *pdev_handle)
4381 {
4382 	struct dp_vdev *vdev = NULL;
4383 	struct dp_peer *peer = NULL;
4384 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4385 	struct dp_soc *soc = pdev->soc;
4386 
4387 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4388 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4389 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4390 		if (vdev->opmode != wlan_op_mode_ap)
4391 			continue;
4392 
4393 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4394 			if (!peer->authorize)
4395 				continue;
4396 
4397 			peer->peer_bs_inact = soc->pdev_bs_inact_reload;
4398 		}
4399 	}
4400 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4401 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4402 }
4403 
4404 /**
4405  * dp_set_inact_params(): set inactivity params
4406  * @pdev_handle - device handle
4407  * @inact_check_interval - inactivity interval
4408  * @inact_normal - Inactivity normal
4409  * @inact_overload - Inactivity overload
4410  *
4411  * Return: bool
4412  */
4413 bool dp_set_inact_params(struct cdp_pdev *pdev_handle,
4414 			 u_int16_t inact_check_interval,
4415 			 u_int16_t inact_normal, u_int16_t inact_overload)
4416 {
4417 	struct dp_soc *soc;
4418 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4419 
4420 	if (!pdev)
4421 		return false;
4422 
4423 	soc = pdev->soc;
4424 	if (!soc)
4425 		return false;
4426 
4427 	soc->pdev_bs_inact_interval = inact_check_interval;
4428 	soc->pdev_bs_inact_normal = inact_normal;
4429 	soc->pdev_bs_inact_overload = inact_overload;
4430 
4431 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4432 					soc->pdev_bs_inact_normal);
4433 
4434 	return true;
4435 }
4436 
4437 /**
4438  * dp_start_inact_timer(): Inactivity timer start
4439  * @pdev_handle - device handle
4440  * @enable - Inactivity timer start/stop
4441  *
4442  * Return: bool
4443  */
4444 bool dp_start_inact_timer(struct cdp_pdev *pdev_handle, bool enable)
4445 {
4446 	struct dp_soc *soc;
4447 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4448 
4449 	if (!pdev)
4450 		return false;
4451 
4452 	soc = pdev->soc;
4453 	if (!soc)
4454 		return false;
4455 
4456 	if (enable) {
4457 		dp_txrx_reset_inact_count((struct cdp_pdev *)pdev);
4458 		qdf_timer_mod(&soc->pdev_bs_inact_timer,
4459 			      soc->pdev_bs_inact_interval * 1000);
4460 	} else {
4461 		qdf_timer_stop(&soc->pdev_bs_inact_timer);
4462 	}
4463 
4464 	return true;
4465 }
4466 
4467 /**
4468  * dp_set_overload(): Set inactivity overload
4469  * @pdev_handle - device handle
4470  * @overload - overload status
4471  *
4472  * Return: void
4473  */
4474 void dp_set_overload(struct cdp_pdev *pdev_handle, bool overload)
4475 {
4476 	struct dp_soc *soc;
4477 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4478 
4479 	if (!pdev)
4480 		return;
4481 
4482 	soc = pdev->soc;
4483 	if (!soc)
4484 		return;
4485 
4486 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4487 			overload ? soc->pdev_bs_inact_overload :
4488 			soc->pdev_bs_inact_normal);
4489 }
4490 
4491 /**
4492  * dp_peer_is_inact(): check whether peer is inactive
4493  * @peer_handle - datapath peer handle
4494  *
4495  * Return: bool
4496  */
4497 bool dp_peer_is_inact(void *peer_handle)
4498 {
4499 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4500 
4501 	if (!peer)
4502 		return false;
4503 
4504 	return peer->peer_bs_inact_flag == 1;
4505 }
4506 
4507 /**
4508  * dp_init_inact_timer: initialize the inact timer
4509  * @soc - SOC handle
4510  *
4511  * Return: void
4512  */
4513 void dp_init_inact_timer(struct dp_soc *soc)
4514 {
4515 	qdf_timer_init(soc->osdev, &soc->pdev_bs_inact_timer,
4516 		dp_txrx_peer_find_inact_timeout_handler,
4517 		(void *)soc, QDF_TIMER_TYPE_WAKE_APPS);
4518 }
4519 
4520 #else
4521 
4522 bool dp_set_inact_params(struct cdp_pdev *pdev, u_int16_t inact_check_interval,
4523 			 u_int16_t inact_normal, u_int16_t inact_overload)
4524 {
4525 	return false;
4526 }
4527 
4528 bool dp_start_inact_timer(struct cdp_pdev *pdev, bool enable)
4529 {
4530 	return false;
4531 }
4532 
4533 void dp_set_overload(struct cdp_pdev *pdev, bool overload)
4534 {
4535 	return;
4536 }
4537 
4538 void dp_init_inact_timer(struct dp_soc *soc)
4539 {
4540 	return;
4541 }
4542 
4543 bool dp_peer_is_inact(void *peer)
4544 {
4545 	return false;
4546 }
4547 #endif
4548 
4549 /*
4550  * dp_peer_unref_delete() - unref and delete peer
4551  * @peer_handle:		Datapath peer handle
4552  *
4553  */
4554 void dp_peer_unref_delete(void *peer_handle)
4555 {
4556 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4557 	struct dp_peer *bss_peer = NULL;
4558 	struct dp_vdev *vdev = peer->vdev;
4559 	struct dp_pdev *pdev = vdev->pdev;
4560 	struct dp_soc *soc = pdev->soc;
4561 	struct dp_peer *tmppeer;
4562 	int found = 0;
4563 	uint16_t peer_id;
4564 	uint16_t vdev_id;
4565 
4566 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4567 		  "%s: peer %pK ref_cnt(before decrement): %d\n", __func__,
4568 		  peer, qdf_atomic_read(&peer->ref_cnt));
4569 	/*
4570 	 * Hold the lock all the way from checking if the peer ref count
4571 	 * is zero until the peer references are removed from the hash
4572 	 * table and vdev list (if the peer ref count is zero).
4573 	 * This protects against a new HL tx operation starting to use the
4574 	 * peer object just after this function concludes it's done being used.
4575 	 * Furthermore, the lock needs to be held while checking whether the
4576 	 * vdev's list of peers is empty, to make sure that list is not modified
4577 	 * concurrently with the empty check.
4578 	 */
4579 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4580 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
4581 		peer_id = peer->peer_ids[0];
4582 		vdev_id = vdev->vdev_id;
4583 
4584 		/*
4585 		 * Make sure that the reference to the peer in
4586 		 * peer object map is removed
4587 		 */
4588 		if (peer_id != HTT_INVALID_PEER)
4589 			soc->peer_id_to_obj_map[peer_id] = NULL;
4590 
4591 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4592 			"Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
4593 
4594 		/* remove the reference to the peer from the hash table */
4595 		dp_peer_find_hash_remove(soc, peer);
4596 
4597 		qdf_spin_lock_bh(&soc->ast_lock);
4598 		if (peer->self_ast_entry) {
4599 			dp_peer_del_ast(soc, peer->self_ast_entry);
4600 			peer->self_ast_entry = NULL;
4601 		}
4602 		qdf_spin_unlock_bh(&soc->ast_lock);
4603 
4604 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
4605 			if (tmppeer == peer) {
4606 				found = 1;
4607 				break;
4608 			}
4609 		}
4610 		if (found) {
4611 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
4612 				peer_list_elem);
4613 		} else {
4614 			/*Ignoring the remove operation as peer not found*/
4615 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4616 				  "peer:%pK not found in vdev:%pK peerlist:%pK",
4617 				  peer, vdev, &peer->vdev->peer_list);
4618 		}
4619 
4620 		/* cleanup the peer data */
4621 		dp_peer_cleanup(vdev, peer);
4622 
4623 		/* check whether the parent vdev has no peers left */
4624 		if (TAILQ_EMPTY(&vdev->peer_list)) {
4625 			/*
4626 			 * Now that there are no references to the peer, we can
4627 			 * release the peer reference lock.
4628 			 */
4629 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4630 			/*
4631 			 * Check if the parent vdev was waiting for its peers
4632 			 * to be deleted, in order for it to be deleted too.
4633 			 */
4634 			if (vdev->delete.pending) {
4635 				ol_txrx_vdev_delete_cb vdev_delete_cb =
4636 					vdev->delete.callback;
4637 				void *vdev_delete_context =
4638 					vdev->delete.context;
4639 
4640 				QDF_TRACE(QDF_MODULE_ID_DP,
4641 					QDF_TRACE_LEVEL_INFO_HIGH,
4642 					FL("deleting vdev object %pK (%pM)"
4643 					" - its last peer is done"),
4644 					vdev, vdev->mac_addr.raw);
4645 				/* all peers are gone, go ahead and delete it */
4646 				dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
4647 								FLOW_TYPE_VDEV,
4648 								vdev_id);
4649 				dp_tx_vdev_detach(vdev);
4650 				QDF_TRACE(QDF_MODULE_ID_DP,
4651 					QDF_TRACE_LEVEL_INFO_HIGH,
4652 					FL("deleting vdev object %pK (%pM)"),
4653 					vdev, vdev->mac_addr.raw);
4654 
4655 				qdf_mem_free(vdev);
4656 				vdev = NULL;
4657 				if (vdev_delete_cb)
4658 					vdev_delete_cb(vdev_delete_context);
4659 			}
4660 		} else {
4661 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4662 		}
4663 
4664 		if (vdev) {
4665 			if (vdev->vap_bss_peer == peer) {
4666 				vdev->vap_bss_peer = NULL;
4667 			}
4668 		}
4669 
4670 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
4671 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
4672 					vdev_id, peer->mac_addr.raw);
4673 		}
4674 
4675 		if (!vdev || !vdev->vap_bss_peer) {
4676 			goto free_peer;
4677 		}
4678 
4679 #ifdef notyet
4680 		qdf_mempool_free(soc->osdev, soc->mempool_ol_ath_peer, peer);
4681 #else
4682 		bss_peer = vdev->vap_bss_peer;
4683 		DP_UPDATE_STATS(vdev, peer);
4684 
4685 free_peer:
4686 		qdf_mem_free(peer);
4687 
4688 #endif
4689 	} else {
4690 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4691 	}
4692 }
4693 
4694 /*
4695  * dp_peer_detach_wifi3() – Detach txrx peer
4696  * @peer_handle: Datapath peer handle
4697  * @bitmap: bitmap indicating special handling of request.
4698  *
4699  */
4700 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
4701 {
4702 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4703 
4704 	/* redirect the peer's rx delivery function to point to a
4705 	 * discard func
4706 	 */
4707 
4708 	peer->rx_opt_proc = dp_rx_discard;
4709 	peer->ctrl_peer = NULL;
4710 
4711 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4712 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
4713 
4714 	dp_local_peer_id_free(peer->vdev->pdev, peer);
4715 	qdf_spinlock_destroy(&peer->peer_info_lock);
4716 
4717 	/*
4718 	 * Remove the reference added during peer_attach.
4719 	 * The peer will still be left allocated until the
4720 	 * PEER_UNMAP message arrives to remove the other
4721 	 * reference, added by the PEER_MAP message.
4722 	 */
4723 	dp_peer_unref_delete(peer_handle);
4724 }
4725 
4726 /*
4727  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
4728  * @peer_handle:		Datapath peer handle
4729  *
4730  */
4731 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
4732 {
4733 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4734 	return vdev->mac_addr.raw;
4735 }
4736 
4737 /*
4738  * dp_vdev_set_wds() - Enable per packet stats
4739  * @vdev_handle: DP VDEV handle
4740  * @val: value
4741  *
4742  * Return: none
4743  */
4744 static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
4745 {
4746 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4747 
4748 	vdev->wds_enabled = val;
4749 	return 0;
4750 }
4751 
4752 /*
4753  * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
4754  * @peer_handle:		Datapath peer handle
4755  *
4756  */
4757 static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
4758 						uint8_t vdev_id)
4759 {
4760 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
4761 	struct dp_vdev *vdev = NULL;
4762 
4763 	if (qdf_unlikely(!pdev))
4764 		return NULL;
4765 
4766 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4767 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4768 		if (vdev->vdev_id == vdev_id)
4769 			break;
4770 	}
4771 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4772 
4773 	return (struct cdp_vdev *)vdev;
4774 }
4775 
4776 static int dp_get_opmode(struct cdp_vdev *vdev_handle)
4777 {
4778 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4779 
4780 	return vdev->opmode;
4781 }
4782 
4783 static
4784 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_vdev *pvdev,
4785 					  ol_txrx_rx_fp *stack_fn_p,
4786 					  ol_osif_vdev_handle *osif_vdev_p)
4787 {
4788 	struct dp_vdev *vdev = dp_get_dp_vdev_from_cdp_vdev(pvdev);
4789 
4790 	qdf_assert(vdev);
4791 	*stack_fn_p = vdev->osif_rx_stack;
4792 	*osif_vdev_p = vdev->osif_vdev;
4793 }
4794 
4795 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
4796 {
4797 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4798 	struct dp_pdev *pdev = vdev->pdev;
4799 
4800 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
4801 }
4802 
4803 /**
4804  * dp_reset_monitor_mode() - Disable monitor mode
4805  * @pdev_handle: Datapath PDEV handle
4806  *
4807  * Return: 0 on success, not 0 on failure
4808  */
4809 static int dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
4810 {
4811 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4812 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4813 	struct dp_soc *soc = pdev->soc;
4814 	uint8_t pdev_id;
4815 	int mac_id;
4816 
4817 	pdev_id = pdev->pdev_id;
4818 	soc = pdev->soc;
4819 
4820 	qdf_spin_lock_bh(&pdev->mon_lock);
4821 
4822 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4823 
4824 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4825 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4826 
4827 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4828 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4829 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4830 
4831 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4832 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4833 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4834 	}
4835 
4836 	pdev->monitor_vdev = NULL;
4837 
4838 	qdf_spin_unlock_bh(&pdev->mon_lock);
4839 
4840 	return 0;
4841 }
4842 
4843 /**
4844  * dp_set_nac() - set peer_nac
4845  * @peer_handle: Datapath PEER handle
4846  *
4847  * Return: void
4848  */
4849 static void dp_set_nac(struct cdp_peer *peer_handle)
4850 {
4851 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4852 
4853 	peer->nac = 1;
4854 }
4855 
4856 /**
4857  * dp_get_tx_pending() - read pending tx
4858  * @pdev_handle: Datapath PDEV handle
4859  *
4860  * Return: outstanding tx
4861  */
4862 static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
4863 {
4864 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4865 
4866 	return qdf_atomic_read(&pdev->num_tx_outstanding);
4867 }
4868 
4869 /**
4870  * dp_get_peer_mac_from_peer_id() - get peer mac
4871  * @pdev_handle: Datapath PDEV handle
4872  * @peer_id: Peer ID
4873  * @peer_mac: MAC addr of PEER
4874  *
4875  * Return: void
4876  */
4877 static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
4878 	uint32_t peer_id, uint8_t *peer_mac)
4879 {
4880 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4881 	struct dp_peer *peer;
4882 
4883 	if (pdev && peer_mac) {
4884 		peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
4885 		if (peer && peer->mac_addr.raw) {
4886 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
4887 					DP_MAC_ADDR_LEN);
4888 		}
4889 	}
4890 }
4891 
4892 /**
4893  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
4894  * @vdev_handle: Datapath VDEV handle
4895  * @smart_monitor: Flag to denote if its smart monitor mode
4896  *
4897  * Return: 0 on success, not 0 on failure
4898  */
4899 static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
4900 		uint8_t smart_monitor)
4901 {
4902 	/* Many monitor VAPs can exists in a system but only one can be up at
4903 	 * anytime
4904 	 */
4905 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4906 	struct dp_pdev *pdev;
4907 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4908 	struct dp_soc *soc;
4909 	uint8_t pdev_id;
4910 	int mac_id;
4911 
4912 	qdf_assert(vdev);
4913 
4914 	pdev = vdev->pdev;
4915 	pdev_id = pdev->pdev_id;
4916 	soc = pdev->soc;
4917 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4918 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
4919 		pdev, pdev_id, soc, vdev);
4920 
4921 	/*Check if current pdev's monitor_vdev exists */
4922 	if (pdev->monitor_vdev) {
4923 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4924 			"vdev=%pK", vdev);
4925 		qdf_assert(vdev);
4926 	}
4927 
4928 	pdev->monitor_vdev = vdev;
4929 
4930 	/* If smart monitor mode, do not configure monitor ring */
4931 	if (smart_monitor)
4932 		return QDF_STATUS_SUCCESS;
4933 
4934 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4935 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
4936 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4937 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4938 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4939 		pdev->mo_data_filter);
4940 
4941 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4942 
4943 	htt_tlv_filter.mpdu_start = 1;
4944 	htt_tlv_filter.msdu_start = 1;
4945 	htt_tlv_filter.packet = 1;
4946 	htt_tlv_filter.msdu_end = 1;
4947 	htt_tlv_filter.mpdu_end = 1;
4948 	htt_tlv_filter.packet_header = 1;
4949 	htt_tlv_filter.attention = 1;
4950 	htt_tlv_filter.ppdu_start = 0;
4951 	htt_tlv_filter.ppdu_end = 0;
4952 	htt_tlv_filter.ppdu_end_user_stats = 0;
4953 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4954 	htt_tlv_filter.ppdu_end_status_done = 0;
4955 	htt_tlv_filter.header_per_msdu = 1;
4956 	htt_tlv_filter.enable_fp =
4957 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4958 	htt_tlv_filter.enable_md = 0;
4959 	htt_tlv_filter.enable_mo =
4960 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4961 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4962 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4963 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4964 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4965 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4966 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4967 
4968 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4969 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4970 
4971 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4972 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4973 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4974 	}
4975 
4976 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4977 
4978 	htt_tlv_filter.mpdu_start = 1;
4979 	htt_tlv_filter.msdu_start = 0;
4980 	htt_tlv_filter.packet = 0;
4981 	htt_tlv_filter.msdu_end = 0;
4982 	htt_tlv_filter.mpdu_end = 0;
4983 	htt_tlv_filter.attention = 0;
4984 	htt_tlv_filter.ppdu_start = 1;
4985 	htt_tlv_filter.ppdu_end = 1;
4986 	htt_tlv_filter.ppdu_end_user_stats = 1;
4987 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4988 	htt_tlv_filter.ppdu_end_status_done = 1;
4989 	htt_tlv_filter.enable_fp = 1;
4990 	htt_tlv_filter.enable_md = 0;
4991 	htt_tlv_filter.enable_mo = 1;
4992 	if (pdev->mcopy_mode) {
4993 		htt_tlv_filter.packet_header = 1;
4994 	}
4995 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
4996 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
4997 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
4998 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
4999 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5000 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
5001 
5002 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5003 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5004 						pdev->pdev_id);
5005 
5006 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5007 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5008 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5009 	}
5010 
5011 	return QDF_STATUS_SUCCESS;
5012 }
5013 
5014 /**
5015  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
5016  * @pdev_handle: Datapath PDEV handle
5017  * @filter_val: Flag to select Filter for monitor mode
5018  * Return: 0 on success, not 0 on failure
5019  */
5020 static int dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
5021 	struct cdp_monitor_filter *filter_val)
5022 {
5023 	/* Many monitor VAPs can exists in a system but only one can be up at
5024 	 * anytime
5025 	 */
5026 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5027 	struct dp_vdev *vdev = pdev->monitor_vdev;
5028 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5029 	struct dp_soc *soc;
5030 	uint8_t pdev_id;
5031 	int mac_id;
5032 
5033 	pdev_id = pdev->pdev_id;
5034 	soc = pdev->soc;
5035 
5036 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
5037 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
5038 		pdev, pdev_id, soc, vdev);
5039 
5040 	/*Check if current pdev's monitor_vdev exists */
5041 	if (!pdev->monitor_vdev) {
5042 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5043 			"vdev=%pK", vdev);
5044 		qdf_assert(vdev);
5045 	}
5046 
5047 	/* update filter mode, type in pdev structure */
5048 	pdev->mon_filter_mode = filter_val->mode;
5049 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
5050 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
5051 	pdev->fp_data_filter = filter_val->fp_data;
5052 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
5053 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
5054 	pdev->mo_data_filter = filter_val->mo_data;
5055 
5056 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5057 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
5058 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
5059 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
5060 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
5061 		pdev->mo_data_filter);
5062 
5063 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5064 
5065 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5066 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5067 
5068 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5069 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
5070 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
5071 
5072 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5073 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5074 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5075 	}
5076 
5077 	htt_tlv_filter.mpdu_start = 1;
5078 	htt_tlv_filter.msdu_start = 1;
5079 	htt_tlv_filter.packet = 1;
5080 	htt_tlv_filter.msdu_end = 1;
5081 	htt_tlv_filter.mpdu_end = 1;
5082 	htt_tlv_filter.packet_header = 1;
5083 	htt_tlv_filter.attention = 1;
5084 	htt_tlv_filter.ppdu_start = 0;
5085 	htt_tlv_filter.ppdu_end = 0;
5086 	htt_tlv_filter.ppdu_end_user_stats = 0;
5087 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
5088 	htt_tlv_filter.ppdu_end_status_done = 0;
5089 	htt_tlv_filter.header_per_msdu = 1;
5090 	htt_tlv_filter.enable_fp =
5091 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
5092 	htt_tlv_filter.enable_md = 0;
5093 	htt_tlv_filter.enable_mo =
5094 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
5095 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
5096 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
5097 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
5098 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
5099 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
5100 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
5101 
5102 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5103 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5104 
5105 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5106 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
5107 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
5108 	}
5109 
5110 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5111 
5112 	htt_tlv_filter.mpdu_start = 1;
5113 	htt_tlv_filter.msdu_start = 0;
5114 	htt_tlv_filter.packet = 0;
5115 	htt_tlv_filter.msdu_end = 0;
5116 	htt_tlv_filter.mpdu_end = 0;
5117 	htt_tlv_filter.attention = 0;
5118 	htt_tlv_filter.ppdu_start = 1;
5119 	htt_tlv_filter.ppdu_end = 1;
5120 	htt_tlv_filter.ppdu_end_user_stats = 1;
5121 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5122 	htt_tlv_filter.ppdu_end_status_done = 1;
5123 	htt_tlv_filter.enable_fp = 1;
5124 	htt_tlv_filter.enable_md = 0;
5125 	htt_tlv_filter.enable_mo = 1;
5126 	if (pdev->mcopy_mode) {
5127 		htt_tlv_filter.packet_header = 1;
5128 	}
5129 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5130 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5131 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5132 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5133 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5134 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
5135 
5136 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5137 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5138 						pdev->pdev_id);
5139 
5140 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5141 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5142 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5143 	}
5144 
5145 	return QDF_STATUS_SUCCESS;
5146 }
5147 
5148 /**
5149  * dp_get_pdev_id_frm_pdev() - get pdev_id
5150  * @pdev_handle: Datapath PDEV handle
5151  *
5152  * Return: pdev_id
5153  */
5154 static
5155 uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
5156 {
5157 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5158 
5159 	return pdev->pdev_id;
5160 }
5161 
5162 /**
5163  * dp_pdev_set_chan_noise_floor() - set channel noise floor
5164  * @pdev_handle: Datapath PDEV handle
5165  * @chan_noise_floor: Channel Noise Floor
5166  *
5167  * Return: void
5168  */
5169 static
5170 void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
5171 				  int16_t chan_noise_floor)
5172 {
5173 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5174 
5175 	pdev->chan_noise_floor = chan_noise_floor;
5176 }
5177 
5178 /**
5179  * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
5180  * @vdev_handle: Datapath VDEV handle
5181  * Return: true on ucast filter flag set
5182  */
5183 static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
5184 {
5185 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5186 	struct dp_pdev *pdev;
5187 
5188 	pdev = vdev->pdev;
5189 
5190 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
5191 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
5192 		return true;
5193 
5194 	return false;
5195 }
5196 
5197 /**
5198  * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
5199  * @vdev_handle: Datapath VDEV handle
5200  * Return: true on mcast filter flag set
5201  */
5202 static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
5203 {
5204 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5205 	struct dp_pdev *pdev;
5206 
5207 	pdev = vdev->pdev;
5208 
5209 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
5210 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
5211 		return true;
5212 
5213 	return false;
5214 }
5215 
5216 /**
5217  * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
5218  * @vdev_handle: Datapath VDEV handle
5219  * Return: true on non data filter flag set
5220  */
5221 static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
5222 {
5223 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5224 	struct dp_pdev *pdev;
5225 
5226 	pdev = vdev->pdev;
5227 
5228 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
5229 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
5230 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
5231 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
5232 			return true;
5233 		}
5234 	}
5235 
5236 	return false;
5237 }
5238 
5239 #ifdef MESH_MODE_SUPPORT
5240 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
5241 {
5242 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5243 
5244 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5245 		FL("val %d"), val);
5246 	vdev->mesh_vdev = val;
5247 }
5248 
5249 /*
5250  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
5251  * @vdev_hdl: virtual device object
5252  * @val: value to be set
5253  *
5254  * Return: void
5255  */
5256 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
5257 {
5258 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5259 
5260 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5261 		FL("val %d"), val);
5262 	vdev->mesh_rx_filter = val;
5263 }
5264 #endif
5265 
5266 /*
5267  * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
5268  * Current scope is bar received count
5269  *
5270  * @pdev_handle: DP_PDEV handle
5271  *
5272  * Return: void
5273  */
5274 #define STATS_PROC_TIMEOUT        (HZ/1000)
5275 
5276 static void
5277 dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
5278 {
5279 	struct dp_vdev *vdev;
5280 	struct dp_peer *peer;
5281 	uint32_t waitcnt;
5282 
5283 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5284 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5285 			if (!peer) {
5286 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5287 					FL("DP Invalid Peer refernce"));
5288 				return;
5289 			}
5290 
5291 			if (peer->delete_in_progress) {
5292 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5293 					FL("DP Peer deletion in progress"));
5294 				continue;
5295 			}
5296 
5297 			qdf_atomic_inc(&peer->ref_cnt);
5298 			waitcnt = 0;
5299 			dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
5300 			while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
5301 				&& waitcnt < 10) {
5302 				schedule_timeout_interruptible(
5303 						STATS_PROC_TIMEOUT);
5304 				waitcnt++;
5305 			}
5306 			qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
5307 			dp_peer_unref_delete(peer);
5308 		}
5309 	}
5310 }
5311 
5312 /**
5313  * dp_rx_bar_stats_cb(): BAR received stats callback
5314  * @soc: SOC handle
5315  * @cb_ctxt: Call back context
5316  * @reo_status: Reo status
5317  *
5318  * return: void
5319  */
5320 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
5321 	union hal_reo_status *reo_status)
5322 {
5323 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
5324 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
5325 
5326 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
5327 		DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
5328 			queue_status->header.status);
5329 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
5330 		return;
5331 	}
5332 
5333 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
5334 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
5335 
5336 }
5337 
5338 /**
5339  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
5340  * @vdev: DP VDEV handle
5341  *
5342  * return: void
5343  */
5344 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
5345 			     struct cdp_vdev_stats *vdev_stats)
5346 {
5347 	struct dp_peer *peer = NULL;
5348 	struct dp_soc *soc = vdev->pdev->soc;
5349 
5350 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
5351 
5352 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
5353 		dp_update_vdev_stats(vdev_stats, peer);
5354 
5355 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5356 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
5357 			&vdev->stats, (uint16_t) vdev->vdev_id,
5358 			UPDATE_VDEV_STATS);
5359 
5360 }
5361 
5362 /**
5363  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
5364  * @pdev: DP PDEV handle
5365  *
5366  * return: void
5367  */
5368 static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
5369 {
5370 	struct dp_vdev *vdev = NULL;
5371 	struct dp_soc *soc = pdev->soc;
5372 	struct cdp_vdev_stats *vdev_stats =
5373 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
5374 
5375 	if (!vdev_stats) {
5376 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5377 			  "DP alloc failure - unable to get alloc vdev stats");
5378 		return;
5379 	}
5380 
5381 	qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
5382 	qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
5383 	qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
5384 
5385 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5386 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5387 
5388 		dp_aggregate_vdev_stats(vdev, vdev_stats);
5389 		dp_update_pdev_stats(pdev, vdev_stats);
5390 
5391 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
5392 
5393 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
5394 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
5395 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
5396 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
5397 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
5398 		DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
5399 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
5400 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host.num);
5401 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
5402 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host.num);
5403 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
5404 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
5405 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
5406 		DP_STATS_AGGR(pdev, vdev,
5407 				tx_i.mcast_en.dropped_map_error);
5408 		DP_STATS_AGGR(pdev, vdev,
5409 				tx_i.mcast_en.dropped_self_mac);
5410 		DP_STATS_AGGR(pdev, vdev,
5411 				tx_i.mcast_en.dropped_send_fail);
5412 		DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
5413 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
5414 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
5415 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
5416 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na.num);
5417 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
5418 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified);
5419 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw);
5420 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.exception_fw);
5421 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.completion_fw);
5422 
5423 		pdev->stats.tx_i.dropped.dropped_pkt.num =
5424 			pdev->stats.tx_i.dropped.dma_error +
5425 			pdev->stats.tx_i.dropped.ring_full +
5426 			pdev->stats.tx_i.dropped.enqueue_fail +
5427 			pdev->stats.tx_i.dropped.desc_na.num +
5428 			pdev->stats.tx_i.dropped.res_full;
5429 
5430 		pdev->stats.tx.last_ack_rssi =
5431 			vdev->stats.tx.last_ack_rssi;
5432 		pdev->stats.tx_i.tso.num_seg =
5433 			vdev->stats.tx_i.tso.num_seg;
5434 	}
5435 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5436 	qdf_mem_free(vdev_stats);
5437 
5438 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5439 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
5440 				&pdev->stats, pdev->pdev_id, UPDATE_PDEV_STATS);
5441 
5442 }
5443 
5444 /**
5445  * dp_vdev_getstats() - get vdev packet level stats
5446  * @vdev_handle: Datapath VDEV handle
5447  * @stats: cdp network device stats structure
5448  *
5449  * Return: void
5450  */
5451 static void dp_vdev_getstats(void *vdev_handle,
5452 		struct cdp_dev_stats *stats)
5453 {
5454 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5455 	struct cdp_vdev_stats *vdev_stats =
5456 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
5457 
5458 	if (!vdev_stats) {
5459 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5460 			  "DP alloc failure - unable to get alloc vdev stats");
5461 		return;
5462 	}
5463 
5464 	dp_aggregate_vdev_stats(vdev, vdev_stats);
5465 
5466 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
5467 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
5468 
5469 	stats->tx_errors = vdev_stats->tx.tx_failed +
5470 		vdev_stats->tx_i.dropped.dropped_pkt.num;
5471 	stats->tx_dropped = stats->tx_errors;
5472 
5473 	stats->rx_packets = vdev_stats->rx.unicast.num +
5474 		vdev_stats->rx.multicast.num +
5475 		vdev_stats->rx.bcast.num;
5476 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
5477 		vdev_stats->rx.multicast.bytes +
5478 		vdev_stats->rx.bcast.bytes;
5479 
5480 }
5481 
5482 
5483 /**
5484  * dp_pdev_getstats() - get pdev packet level stats
5485  * @pdev_handle: Datapath PDEV handle
5486  * @stats: cdp network device stats structure
5487  *
5488  * Return: void
5489  */
5490 static void dp_pdev_getstats(void *pdev_handle,
5491 		struct cdp_dev_stats *stats)
5492 {
5493 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5494 
5495 	dp_aggregate_pdev_stats(pdev);
5496 
5497 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
5498 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
5499 
5500 	stats->tx_errors = pdev->stats.tx.tx_failed +
5501 		pdev->stats.tx_i.dropped.dropped_pkt.num;
5502 	stats->tx_dropped = stats->tx_errors;
5503 
5504 	stats->rx_packets = pdev->stats.rx.unicast.num +
5505 		pdev->stats.rx.multicast.num +
5506 		pdev->stats.rx.bcast.num;
5507 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
5508 		pdev->stats.rx.multicast.bytes +
5509 		pdev->stats.rx.bcast.bytes;
5510 }
5511 
5512 /**
5513  * dp_get_device_stats() - get interface level packet stats
5514  * @handle: device handle
5515  * @stats: cdp network device stats structure
5516  * @type: device type pdev/vdev
5517  *
5518  * Return: void
5519  */
5520 static void dp_get_device_stats(void *handle,
5521 		struct cdp_dev_stats *stats, uint8_t type)
5522 {
5523 	switch (type) {
5524 	case UPDATE_VDEV_STATS:
5525 		dp_vdev_getstats(handle, stats);
5526 		break;
5527 	case UPDATE_PDEV_STATS:
5528 		dp_pdev_getstats(handle, stats);
5529 		break;
5530 	default:
5531 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5532 			"apstats cannot be updated for this input "
5533 			"type %d", type);
5534 		break;
5535 	}
5536 
5537 }
5538 
5539 
5540 /**
5541  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
5542  * @pdev: DP_PDEV Handle
5543  *
5544  * Return:void
5545  */
5546 static inline void
5547 dp_print_pdev_tx_stats(struct dp_pdev *pdev)
5548 {
5549 	uint8_t index = 0;
5550 	DP_PRINT_STATS("PDEV Tx Stats:\n");
5551 	DP_PRINT_STATS("Received From Stack:");
5552 	DP_PRINT_STATS("	Packets = %d",
5553 			pdev->stats.tx_i.rcvd.num);
5554 	DP_PRINT_STATS("	Bytes = %llu",
5555 			pdev->stats.tx_i.rcvd.bytes);
5556 	DP_PRINT_STATS("Processed:");
5557 	DP_PRINT_STATS("	Packets = %d",
5558 			pdev->stats.tx_i.processed.num);
5559 	DP_PRINT_STATS("	Bytes = %llu",
5560 			pdev->stats.tx_i.processed.bytes);
5561 	DP_PRINT_STATS("Total Completions:");
5562 	DP_PRINT_STATS("	Packets = %u",
5563 			pdev->stats.tx.comp_pkt.num);
5564 	DP_PRINT_STATS("	Bytes = %llu",
5565 			pdev->stats.tx.comp_pkt.bytes);
5566 	DP_PRINT_STATS("Successful Completions:");
5567 	DP_PRINT_STATS("	Packets = %u",
5568 			pdev->stats.tx.tx_success.num);
5569 	DP_PRINT_STATS("	Bytes = %llu",
5570 			pdev->stats.tx.tx_success.bytes);
5571 	DP_PRINT_STATS("Dropped:");
5572 	DP_PRINT_STATS("	Total = %d",
5573 			pdev->stats.tx_i.dropped.dropped_pkt.num);
5574 	DP_PRINT_STATS("	Dma_map_error = %d",
5575 			pdev->stats.tx_i.dropped.dma_error);
5576 	DP_PRINT_STATS("	Ring Full = %d",
5577 			pdev->stats.tx_i.dropped.ring_full);
5578 	DP_PRINT_STATS("	Descriptor Not available = %d",
5579 			pdev->stats.tx_i.dropped.desc_na.num);
5580 	DP_PRINT_STATS("	HW enqueue failed= %d",
5581 			pdev->stats.tx_i.dropped.enqueue_fail);
5582 	DP_PRINT_STATS("	Resources Full = %d",
5583 			pdev->stats.tx_i.dropped.res_full);
5584 	DP_PRINT_STATS("	FW removed = %d",
5585 			pdev->stats.tx.dropped.fw_rem);
5586 	DP_PRINT_STATS("	FW removed transmitted = %d",
5587 			pdev->stats.tx.dropped.fw_rem_tx);
5588 	DP_PRINT_STATS("	FW removed untransmitted = %d",
5589 			pdev->stats.tx.dropped.fw_rem_notx);
5590 	DP_PRINT_STATS("	FW removed untransmitted fw_reason1 = %d",
5591 			pdev->stats.tx.dropped.fw_reason1);
5592 	DP_PRINT_STATS("	FW removed untransmitted fw_reason2 = %d",
5593 			pdev->stats.tx.dropped.fw_reason2);
5594 	DP_PRINT_STATS("	FW removed untransmitted fw_reason3 = %d",
5595 			pdev->stats.tx.dropped.fw_reason3);
5596 	DP_PRINT_STATS("	Aged Out from msdu/mpdu queues = %d",
5597 			pdev->stats.tx.dropped.age_out);
5598 	DP_PRINT_STATS("	Multicast:");
5599 	DP_PRINT_STATS("	Packets: %u",
5600 		       pdev->stats.tx.mcast.num);
5601 	DP_PRINT_STATS("	Bytes: %llu",
5602 		       pdev->stats.tx.mcast.bytes);
5603 	DP_PRINT_STATS("Scatter Gather:");
5604 	DP_PRINT_STATS("	Packets = %d",
5605 			pdev->stats.tx_i.sg.sg_pkt.num);
5606 	DP_PRINT_STATS("	Bytes = %llu",
5607 			pdev->stats.tx_i.sg.sg_pkt.bytes);
5608 	DP_PRINT_STATS("	Dropped By Host = %d",
5609 			pdev->stats.tx_i.sg.dropped_host.num);
5610 	DP_PRINT_STATS("	Dropped By Target = %d",
5611 			pdev->stats.tx_i.sg.dropped_target);
5612 	DP_PRINT_STATS("TSO:");
5613 	DP_PRINT_STATS("	Number of Segments = %d",
5614 			pdev->stats.tx_i.tso.num_seg);
5615 	DP_PRINT_STATS("	Packets = %d",
5616 			pdev->stats.tx_i.tso.tso_pkt.num);
5617 	DP_PRINT_STATS("	Bytes = %llu",
5618 			pdev->stats.tx_i.tso.tso_pkt.bytes);
5619 	DP_PRINT_STATS("	Dropped By Host = %d",
5620 			pdev->stats.tx_i.tso.dropped_host.num);
5621 	DP_PRINT_STATS("Mcast Enhancement:");
5622 	DP_PRINT_STATS("	Packets = %d",
5623 			pdev->stats.tx_i.mcast_en.mcast_pkt.num);
5624 	DP_PRINT_STATS("	Bytes = %llu",
5625 			pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
5626 	DP_PRINT_STATS("	Dropped: Map Errors = %d",
5627 			pdev->stats.tx_i.mcast_en.dropped_map_error);
5628 	DP_PRINT_STATS("	Dropped: Self Mac = %d",
5629 			pdev->stats.tx_i.mcast_en.dropped_self_mac);
5630 	DP_PRINT_STATS("	Dropped: Send Fail = %d",
5631 			pdev->stats.tx_i.mcast_en.dropped_send_fail);
5632 	DP_PRINT_STATS("	Unicast sent = %d",
5633 			pdev->stats.tx_i.mcast_en.ucast);
5634 	DP_PRINT_STATS("Raw:");
5635 	DP_PRINT_STATS("	Packets = %d",
5636 			pdev->stats.tx_i.raw.raw_pkt.num);
5637 	DP_PRINT_STATS("	Bytes = %llu",
5638 			pdev->stats.tx_i.raw.raw_pkt.bytes);
5639 	DP_PRINT_STATS("	DMA map error = %d",
5640 			pdev->stats.tx_i.raw.dma_map_error);
5641 	DP_PRINT_STATS("Reinjected:");
5642 	DP_PRINT_STATS("	Packets = %d",
5643 			pdev->stats.tx_i.reinject_pkts.num);
5644 	DP_PRINT_STATS("	Bytes = %llu\n",
5645 			pdev->stats.tx_i.reinject_pkts.bytes);
5646 	DP_PRINT_STATS("Inspected:");
5647 	DP_PRINT_STATS("	Packets = %d",
5648 			pdev->stats.tx_i.inspect_pkts.num);
5649 	DP_PRINT_STATS("	Bytes = %llu",
5650 			pdev->stats.tx_i.inspect_pkts.bytes);
5651 	DP_PRINT_STATS("Nawds Multicast:");
5652 	DP_PRINT_STATS("	Packets = %d",
5653 			pdev->stats.tx_i.nawds_mcast.num);
5654 	DP_PRINT_STATS("	Bytes = %llu",
5655 			pdev->stats.tx_i.nawds_mcast.bytes);
5656 	DP_PRINT_STATS("CCE Classified:");
5657 	DP_PRINT_STATS("	CCE Classified Packets: %u",
5658 			pdev->stats.tx_i.cce_classified);
5659 	DP_PRINT_STATS("	RAW CCE Classified Packets: %u",
5660 			pdev->stats.tx_i.cce_classified_raw);
5661 	DP_PRINT_STATS("Mesh stats:");
5662 	DP_PRINT_STATS("	frames to firmware: %u",
5663 			pdev->stats.tx_i.mesh.exception_fw);
5664 	DP_PRINT_STATS("	completions from fw: %u",
5665 			pdev->stats.tx_i.mesh.completion_fw);
5666 	DP_PRINT_STATS("PPDU stats counter");
5667 	for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
5668 		DP_PRINT_STATS("	Tag[%d] = %llu", index,
5669 				pdev->stats.ppdu_stats_counter[index]);
5670 	}
5671 }
5672 
5673 /**
5674  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
5675  * @pdev: DP_PDEV Handle
5676  *
5677  * Return: void
5678  */
5679 static inline void
5680 dp_print_pdev_rx_stats(struct dp_pdev *pdev)
5681 {
5682 	DP_PRINT_STATS("PDEV Rx Stats:\n");
5683 	DP_PRINT_STATS("Received From HW (Per Rx Ring):");
5684 	DP_PRINT_STATS("	Packets = %d %d %d %d",
5685 			pdev->stats.rx.rcvd_reo[0].num,
5686 			pdev->stats.rx.rcvd_reo[1].num,
5687 			pdev->stats.rx.rcvd_reo[2].num,
5688 			pdev->stats.rx.rcvd_reo[3].num);
5689 	DP_PRINT_STATS("	Bytes = %llu %llu %llu %llu",
5690 			pdev->stats.rx.rcvd_reo[0].bytes,
5691 			pdev->stats.rx.rcvd_reo[1].bytes,
5692 			pdev->stats.rx.rcvd_reo[2].bytes,
5693 			pdev->stats.rx.rcvd_reo[3].bytes);
5694 	DP_PRINT_STATS("Replenished:");
5695 	DP_PRINT_STATS("	Packets = %d",
5696 			pdev->stats.replenish.pkts.num);
5697 	DP_PRINT_STATS("	Bytes = %llu",
5698 			pdev->stats.replenish.pkts.bytes);
5699 	DP_PRINT_STATS("	Buffers Added To Freelist = %d",
5700 			pdev->stats.buf_freelist);
5701 	DP_PRINT_STATS("	Low threshold intr = %d",
5702 			pdev->stats.replenish.low_thresh_intrs);
5703 	DP_PRINT_STATS("Dropped:");
5704 	DP_PRINT_STATS("	msdu_not_done = %d",
5705 			pdev->stats.dropped.msdu_not_done);
5706 	DP_PRINT_STATS("        mon_rx_drop = %d",
5707 			pdev->stats.dropped.mon_rx_drop);
5708 	DP_PRINT_STATS("Sent To Stack:");
5709 	DP_PRINT_STATS("	Packets = %d",
5710 			pdev->stats.rx.to_stack.num);
5711 	DP_PRINT_STATS("	Bytes = %llu",
5712 			pdev->stats.rx.to_stack.bytes);
5713 	DP_PRINT_STATS("Multicast/Broadcast:");
5714 	DP_PRINT_STATS("	Packets = %d",
5715 			(pdev->stats.rx.multicast.num +
5716 			pdev->stats.rx.bcast.num));
5717 	DP_PRINT_STATS("	Bytes = %llu",
5718 			(pdev->stats.rx.multicast.bytes +
5719 			pdev->stats.rx.bcast.bytes));
5720 	DP_PRINT_STATS("Errors:");
5721 	DP_PRINT_STATS("	Rxdma Ring Un-inititalized = %d",
5722 			pdev->stats.replenish.rxdma_err);
5723 	DP_PRINT_STATS("	Desc Alloc Failed: = %d",
5724 			pdev->stats.err.desc_alloc_fail);
5725 	DP_PRINT_STATS("	IP checksum error = %d",
5726 		       pdev->stats.err.ip_csum_err);
5727 	DP_PRINT_STATS("	TCP/UDP checksum error = %d",
5728 		       pdev->stats.err.tcp_udp_csum_err);
5729 
5730 	/* Get bar_recv_cnt */
5731 	dp_aggregate_pdev_ctrl_frames_stats(pdev);
5732 	DP_PRINT_STATS("BAR Received Count: = %d",
5733 			pdev->stats.rx.bar_recv_cnt);
5734 
5735 }
5736 
5737 /**
5738  * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
5739  * @pdev: DP_PDEV Handle
5740  *
5741  * Return: void
5742  */
5743 static inline void
5744 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
5745 {
5746 	struct cdp_pdev_mon_stats *rx_mon_stats;
5747 
5748 	rx_mon_stats = &pdev->rx_mon_stats;
5749 
5750 	DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
5751 
5752 	dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
5753 
5754 	DP_PRINT_STATS("status_ppdu_done_cnt = %d",
5755 		       rx_mon_stats->status_ppdu_done);
5756 	DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
5757 		       rx_mon_stats->dest_ppdu_done);
5758 	DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
5759 		       rx_mon_stats->dest_mpdu_done);
5760 	DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
5761 		       rx_mon_stats->dest_mpdu_drop);
5762 }
5763 
5764 /**
5765  * dp_print_soc_tx_stats(): Print SOC level  stats
5766  * @soc DP_SOC Handle
5767  *
5768  * Return: void
5769  */
5770 static inline void
5771 dp_print_soc_tx_stats(struct dp_soc *soc)
5772 {
5773 	uint8_t desc_pool_id;
5774 	soc->stats.tx.desc_in_use = 0;
5775 
5776 	DP_PRINT_STATS("SOC Tx Stats:\n");
5777 
5778 	for (desc_pool_id = 0;
5779 	     desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5780 	     desc_pool_id++)
5781 		soc->stats.tx.desc_in_use +=
5782 			soc->tx_desc[desc_pool_id].num_allocated;
5783 
5784 	DP_PRINT_STATS("Tx Descriptors In Use = %d",
5785 			soc->stats.tx.desc_in_use);
5786 	DP_PRINT_STATS("Invalid peer:");
5787 	DP_PRINT_STATS("	Packets = %d",
5788 			soc->stats.tx.tx_invalid_peer.num);
5789 	DP_PRINT_STATS("	Bytes = %llu",
5790 			soc->stats.tx.tx_invalid_peer.bytes);
5791 	DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
5792 			soc->stats.tx.tcl_ring_full[0],
5793 			soc->stats.tx.tcl_ring_full[1],
5794 			soc->stats.tx.tcl_ring_full[2]);
5795 
5796 }
5797 /**
5798  * dp_print_soc_rx_stats: Print SOC level Rx stats
5799  * @soc: DP_SOC Handle
5800  *
5801  * Return:void
5802  */
5803 static inline void
5804 dp_print_soc_rx_stats(struct dp_soc *soc)
5805 {
5806 	uint32_t i;
5807 	char reo_error[DP_REO_ERR_LENGTH];
5808 	char rxdma_error[DP_RXDMA_ERR_LENGTH];
5809 	uint8_t index = 0;
5810 
5811 	DP_PRINT_STATS("SOC Rx Stats:\n");
5812 	DP_PRINT_STATS("Fragmented packets: %u",
5813 		       soc->stats.rx.rx_frags);
5814 	DP_PRINT_STATS("Reo reinjected packets: %u",
5815 		       soc->stats.rx.reo_reinject);
5816 	DP_PRINT_STATS("Errors:\n");
5817 	DP_PRINT_STATS("Rx Decrypt Errors = %d",
5818 			(soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
5819 			soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
5820 	DP_PRINT_STATS("Invalid RBM = %d",
5821 			soc->stats.rx.err.invalid_rbm);
5822 	DP_PRINT_STATS("Invalid Vdev = %d",
5823 			soc->stats.rx.err.invalid_vdev);
5824 	DP_PRINT_STATS("Invalid Pdev = %d",
5825 			soc->stats.rx.err.invalid_pdev);
5826 	DP_PRINT_STATS("Invalid Peer = %d",
5827 			soc->stats.rx.err.rx_invalid_peer.num);
5828 	DP_PRINT_STATS("HAL Ring Access Fail = %d",
5829 			soc->stats.rx.err.hal_ring_access_fail);
5830 
5831 	for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
5832 		index += qdf_snprint(&rxdma_error[index],
5833 				DP_RXDMA_ERR_LENGTH - index,
5834 				" %d", soc->stats.rx.err.rxdma_error[i]);
5835 	}
5836 	DP_PRINT_STATS("RXDMA Error (0-31):%s",
5837 			rxdma_error);
5838 
5839 	index = 0;
5840 	for (i = 0; i < HAL_REO_ERR_MAX; i++) {
5841 		index += qdf_snprint(&reo_error[index],
5842 				DP_REO_ERR_LENGTH - index,
5843 				" %d", soc->stats.rx.err.reo_error[i]);
5844 	}
5845 	DP_PRINT_STATS("REO Error(0-14):%s",
5846 			reo_error);
5847 }
5848 
5849 
5850 /**
5851  * dp_print_ring_stat_from_hal(): Print hal level ring stats
5852  * @soc: DP_SOC handle
5853  * @srng: DP_SRNG handle
5854  * @ring_name: SRNG name
5855  *
5856  * Return: void
5857  */
5858 static inline void
5859 dp_print_ring_stat_from_hal(struct dp_soc *soc,  struct dp_srng *srng,
5860 	char *ring_name)
5861 {
5862 	uint32_t tailp;
5863 	uint32_t headp;
5864 
5865 	if (srng->hal_srng != NULL) {
5866 		hal_api_get_tphp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
5867 		DP_PRINT_STATS("%s : Head pointer = %d  Tail Pointer = %d\n",
5868 				ring_name, headp, tailp);
5869 	}
5870 }
5871 
5872 /**
5873  * dp_print_ring_stats(): Print tail and head pointer
5874  * @pdev: DP_PDEV handle
5875  *
5876  * Return:void
5877  */
5878 static inline void
5879 dp_print_ring_stats(struct dp_pdev *pdev)
5880 {
5881 	uint32_t i;
5882 	char ring_name[STR_MAXLEN + 1];
5883 	int mac_id;
5884 
5885 	dp_print_ring_stat_from_hal(pdev->soc,
5886 			&pdev->soc->reo_exception_ring,
5887 			"Reo Exception Ring");
5888 	dp_print_ring_stat_from_hal(pdev->soc,
5889 			&pdev->soc->reo_reinject_ring,
5890 			"Reo Inject Ring");
5891 	dp_print_ring_stat_from_hal(pdev->soc,
5892 			&pdev->soc->reo_cmd_ring,
5893 			"Reo Command Ring");
5894 	dp_print_ring_stat_from_hal(pdev->soc,
5895 			&pdev->soc->reo_status_ring,
5896 			"Reo Status Ring");
5897 	dp_print_ring_stat_from_hal(pdev->soc,
5898 			&pdev->soc->rx_rel_ring,
5899 			"Rx Release ring");
5900 	dp_print_ring_stat_from_hal(pdev->soc,
5901 			&pdev->soc->tcl_cmd_ring,
5902 			"Tcl command Ring");
5903 	dp_print_ring_stat_from_hal(pdev->soc,
5904 			&pdev->soc->tcl_status_ring,
5905 			"Tcl Status Ring");
5906 	dp_print_ring_stat_from_hal(pdev->soc,
5907 			&pdev->soc->wbm_desc_rel_ring,
5908 			"Wbm Desc Rel Ring");
5909 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5910 		snprintf(ring_name, STR_MAXLEN, "Reo Dest Ring %d", i);
5911 		dp_print_ring_stat_from_hal(pdev->soc,
5912 				&pdev->soc->reo_dest_ring[i],
5913 				ring_name);
5914 	}
5915 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
5916 		snprintf(ring_name, STR_MAXLEN, "Tcl Data Ring %d", i);
5917 		dp_print_ring_stat_from_hal(pdev->soc,
5918 				&pdev->soc->tcl_data_ring[i],
5919 				ring_name);
5920 	}
5921 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
5922 		snprintf(ring_name, STR_MAXLEN, "Tx Comp Ring %d", i);
5923 		dp_print_ring_stat_from_hal(pdev->soc,
5924 				&pdev->soc->tx_comp_ring[i],
5925 				ring_name);
5926 	}
5927 	dp_print_ring_stat_from_hal(pdev->soc,
5928 			&pdev->rx_refill_buf_ring,
5929 			"Rx Refill Buf Ring");
5930 
5931 	dp_print_ring_stat_from_hal(pdev->soc,
5932 			&pdev->rx_refill_buf_ring2,
5933 			"Second Rx Refill Buf Ring");
5934 
5935 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5936 		dp_print_ring_stat_from_hal(pdev->soc,
5937 				&pdev->rxdma_mon_buf_ring[mac_id],
5938 				"Rxdma Mon Buf Ring");
5939 		dp_print_ring_stat_from_hal(pdev->soc,
5940 				&pdev->rxdma_mon_dst_ring[mac_id],
5941 				"Rxdma Mon Dst Ring");
5942 		dp_print_ring_stat_from_hal(pdev->soc,
5943 				&pdev->rxdma_mon_status_ring[mac_id],
5944 				"Rxdma Mon Status Ring");
5945 		dp_print_ring_stat_from_hal(pdev->soc,
5946 				&pdev->rxdma_mon_desc_ring[mac_id],
5947 				"Rxdma mon desc Ring");
5948 	}
5949 
5950 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
5951 		snprintf(ring_name, STR_MAXLEN, "Rxdma err dst ring %d", i);
5952 		dp_print_ring_stat_from_hal(pdev->soc,
5953 			&pdev->rxdma_err_dst_ring[i],
5954 			ring_name);
5955 	}
5956 
5957 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
5958 		snprintf(ring_name, STR_MAXLEN, "Rx mac buf ring %d", i);
5959 		dp_print_ring_stat_from_hal(pdev->soc,
5960 				&pdev->rx_mac_buf_ring[i],
5961 				ring_name);
5962 	}
5963 }
5964 
5965 /**
5966  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
5967  * @vdev: DP_VDEV handle
5968  *
5969  * Return:void
5970  */
5971 static inline void
5972 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
5973 {
5974 	struct dp_peer *peer = NULL;
5975 	struct dp_soc *soc = (struct dp_soc *)vdev->pdev->soc;
5976 
5977 	DP_STATS_CLR(vdev->pdev);
5978 	DP_STATS_CLR(vdev->pdev->soc);
5979 	DP_STATS_CLR(vdev);
5980 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5981 		if (!peer)
5982 			return;
5983 		DP_STATS_CLR(peer);
5984 
5985 		if (soc->cdp_soc.ol_ops->update_dp_stats) {
5986 			soc->cdp_soc.ol_ops->update_dp_stats(
5987 					vdev->pdev->ctrl_pdev,
5988 					&peer->stats,
5989 					peer->peer_ids[0],
5990 					UPDATE_PEER_STATS);
5991 		}
5992 
5993 	}
5994 
5995 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5996 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
5997 				&vdev->stats, (uint16_t)vdev->vdev_id,
5998 				UPDATE_VDEV_STATS);
5999 }
6000 
6001 /**
6002  * dp_print_common_rates_info(): Print common rate for tx or rx
6003  * @pkt_type_array: rate type array contains rate info
6004  *
6005  * Return:void
6006  */
6007 static inline void
6008 dp_print_common_rates_info(struct cdp_pkt_type *pkt_type_array)
6009 {
6010 	uint8_t mcs, pkt_type;
6011 
6012 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
6013 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
6014 			if (!dp_rate_string[pkt_type][mcs].valid)
6015 				continue;
6016 
6017 			DP_PRINT_STATS("	%s = %d",
6018 				       dp_rate_string[pkt_type][mcs].mcs_type,
6019 				       pkt_type_array[pkt_type].mcs_count[mcs]);
6020 		}
6021 
6022 		DP_PRINT_STATS("\n");
6023 	}
6024 }
6025 
6026 /**
6027  * dp_print_rx_rates(): Print Rx rate stats
6028  * @vdev: DP_VDEV handle
6029  *
6030  * Return:void
6031  */
6032 static inline void
6033 dp_print_rx_rates(struct dp_vdev *vdev)
6034 {
6035 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6036 	uint8_t i;
6037 	uint8_t index = 0;
6038 	char nss[DP_NSS_LENGTH];
6039 
6040 	DP_PRINT_STATS("Rx Rate Info:\n");
6041 	dp_print_common_rates_info(pdev->stats.rx.pkt_type);
6042 
6043 
6044 	index = 0;
6045 	for (i = 0; i < SS_COUNT; i++) {
6046 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6047 				" %d", pdev->stats.rx.nss[i]);
6048 	}
6049 	DP_PRINT_STATS("NSS(1-8) = %s",
6050 			nss);
6051 
6052 	DP_PRINT_STATS("SGI ="
6053 			" 0.8us %d,"
6054 			" 0.4us %d,"
6055 			" 1.6us %d,"
6056 			" 3.2us %d,",
6057 			pdev->stats.rx.sgi_count[0],
6058 			pdev->stats.rx.sgi_count[1],
6059 			pdev->stats.rx.sgi_count[2],
6060 			pdev->stats.rx.sgi_count[3]);
6061 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
6062 			pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
6063 			pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
6064 	DP_PRINT_STATS("Reception Type ="
6065 			" SU: %d,"
6066 			" MU_MIMO:%d,"
6067 			" MU_OFDMA:%d,"
6068 			" MU_OFDMA_MIMO:%d\n",
6069 			pdev->stats.rx.reception_type[0],
6070 			pdev->stats.rx.reception_type[1],
6071 			pdev->stats.rx.reception_type[2],
6072 			pdev->stats.rx.reception_type[3]);
6073 	DP_PRINT_STATS("Aggregation:\n");
6074 	DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
6075 			pdev->stats.rx.ampdu_cnt);
6076 	DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
6077 			pdev->stats.rx.non_ampdu_cnt);
6078 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
6079 			pdev->stats.rx.amsdu_cnt);
6080 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
6081 			pdev->stats.rx.non_amsdu_cnt);
6082 }
6083 
6084 /**
6085  * dp_print_tx_rates(): Print tx rates
6086  * @vdev: DP_VDEV handle
6087  *
6088  * Return:void
6089  */
6090 static inline void
6091 dp_print_tx_rates(struct dp_vdev *vdev)
6092 {
6093 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6094 	uint8_t index;
6095 	char nss[DP_NSS_LENGTH];
6096 	int nss_index;
6097 
6098 	DP_PRINT_STATS("Tx Rate Info:\n");
6099 	dp_print_common_rates_info(pdev->stats.tx.pkt_type);
6100 
6101 	DP_PRINT_STATS("SGI ="
6102 			" 0.8us %d"
6103 			" 0.4us %d"
6104 			" 1.6us %d"
6105 			" 3.2us %d",
6106 			pdev->stats.tx.sgi_count[0],
6107 			pdev->stats.tx.sgi_count[1],
6108 			pdev->stats.tx.sgi_count[2],
6109 			pdev->stats.tx.sgi_count[3]);
6110 
6111 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
6112 			pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
6113 			pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
6114 
6115 	index = 0;
6116 	for (nss_index = 0; nss_index < SS_COUNT; nss_index++) {
6117 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6118 				" %d", pdev->stats.tx.nss[nss_index]);
6119 	}
6120 
6121 	DP_PRINT_STATS("NSS(1-8) = %s", nss);
6122 	DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
6123 	DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
6124 	DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
6125 	DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
6126 	DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
6127 
6128 	DP_PRINT_STATS("Aggregation:\n");
6129 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
6130 			pdev->stats.tx.amsdu_cnt);
6131 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
6132 			pdev->stats.tx.non_amsdu_cnt);
6133 }
6134 
6135 /**
6136  * dp_print_peer_stats():print peer stats
6137  * @peer: DP_PEER handle
6138  *
6139  * return void
6140  */
6141 static inline void dp_print_peer_stats(struct dp_peer *peer)
6142 {
6143 	uint8_t i;
6144 	uint32_t index;
6145 	char nss[DP_NSS_LENGTH];
6146 	DP_PRINT_STATS("Node Tx Stats:\n");
6147 	DP_PRINT_STATS("Total Packet Completions = %d",
6148 			peer->stats.tx.comp_pkt.num);
6149 	DP_PRINT_STATS("Total Bytes Completions = %llu",
6150 			peer->stats.tx.comp_pkt.bytes);
6151 	DP_PRINT_STATS("Success Packets = %d",
6152 			peer->stats.tx.tx_success.num);
6153 	DP_PRINT_STATS("Success Bytes = %llu",
6154 			peer->stats.tx.tx_success.bytes);
6155 	DP_PRINT_STATS("Unicast Success Packets = %d",
6156 			peer->stats.tx.ucast.num);
6157 	DP_PRINT_STATS("Unicast Success Bytes = %llu",
6158 			peer->stats.tx.ucast.bytes);
6159 	DP_PRINT_STATS("Multicast Success Packets = %d",
6160 			peer->stats.tx.mcast.num);
6161 	DP_PRINT_STATS("Multicast Success Bytes = %llu",
6162 			peer->stats.tx.mcast.bytes);
6163 	DP_PRINT_STATS("Broadcast Success Packets = %d",
6164 			peer->stats.tx.bcast.num);
6165 	DP_PRINT_STATS("Broadcast Success Bytes = %llu",
6166 			peer->stats.tx.bcast.bytes);
6167 	DP_PRINT_STATS("Packets Failed = %d",
6168 			peer->stats.tx.tx_failed);
6169 	DP_PRINT_STATS("Packets In OFDMA = %d",
6170 			peer->stats.tx.ofdma);
6171 	DP_PRINT_STATS("Packets In STBC = %d",
6172 			peer->stats.tx.stbc);
6173 	DP_PRINT_STATS("Packets In LDPC = %d",
6174 			peer->stats.tx.ldpc);
6175 	DP_PRINT_STATS("Packet Retries = %d",
6176 			peer->stats.tx.retries);
6177 	DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
6178 			peer->stats.tx.amsdu_cnt);
6179 	DP_PRINT_STATS("Last Packet RSSI = %d",
6180 			peer->stats.tx.last_ack_rssi);
6181 	DP_PRINT_STATS("Dropped At FW: Removed = %d",
6182 			peer->stats.tx.dropped.fw_rem);
6183 	DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
6184 			peer->stats.tx.dropped.fw_rem_tx);
6185 	DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
6186 			peer->stats.tx.dropped.fw_rem_notx);
6187 	DP_PRINT_STATS("Dropped : Age Out = %d",
6188 			peer->stats.tx.dropped.age_out);
6189 	DP_PRINT_STATS("NAWDS : ");
6190 	DP_PRINT_STATS("	Nawds multicast Drop Tx Packet = %d",
6191 			peer->stats.tx.nawds_mcast_drop);
6192 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Count = %d",
6193 			peer->stats.tx.nawds_mcast.num);
6194 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Bytes = %llu",
6195 			peer->stats.tx.nawds_mcast.bytes);
6196 
6197 	DP_PRINT_STATS("Rate Info:");
6198 	dp_print_common_rates_info(peer->stats.tx.pkt_type);
6199 
6200 
6201 	DP_PRINT_STATS("SGI = "
6202 			" 0.8us %d"
6203 			" 0.4us %d"
6204 			" 1.6us %d"
6205 			" 3.2us %d",
6206 			peer->stats.tx.sgi_count[0],
6207 			peer->stats.tx.sgi_count[1],
6208 			peer->stats.tx.sgi_count[2],
6209 			peer->stats.tx.sgi_count[3]);
6210 	DP_PRINT_STATS("Excess Retries per AC ");
6211 	DP_PRINT_STATS("	 Best effort = %d",
6212 			peer->stats.tx.excess_retries_per_ac[0]);
6213 	DP_PRINT_STATS("	 Background= %d",
6214 			peer->stats.tx.excess_retries_per_ac[1]);
6215 	DP_PRINT_STATS("	 Video = %d",
6216 			peer->stats.tx.excess_retries_per_ac[2]);
6217 	DP_PRINT_STATS("	 Voice = %d",
6218 			peer->stats.tx.excess_retries_per_ac[3]);
6219 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
6220 			peer->stats.tx.bw[2], peer->stats.tx.bw[3],
6221 			peer->stats.tx.bw[4], peer->stats.tx.bw[5]);
6222 
6223 	index = 0;
6224 	for (i = 0; i < SS_COUNT; i++) {
6225 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6226 				" %d", peer->stats.tx.nss[i]);
6227 	}
6228 	DP_PRINT_STATS("NSS(1-8) = %s",
6229 			nss);
6230 
6231 	DP_PRINT_STATS("Aggregation:");
6232 	DP_PRINT_STATS("	Number of Msdu's Part of Amsdu = %d",
6233 			peer->stats.tx.amsdu_cnt);
6234 	DP_PRINT_STATS("	Number of Msdu's With No Msdu Level Aggregation = %d\n",
6235 			peer->stats.tx.non_amsdu_cnt);
6236 
6237 	DP_PRINT_STATS("Bytes and Packets transmitted  in last one sec:");
6238 	DP_PRINT_STATS("	Bytes transmitted in last sec: %d",
6239 		       peer->stats.tx.tx_byte_rate);
6240 	DP_PRINT_STATS("	Data transmitted in last sec: %d",
6241 		       peer->stats.tx.tx_data_rate);
6242 
6243 	DP_PRINT_STATS("Node Rx Stats:");
6244 	DP_PRINT_STATS("Packets Sent To Stack = %d",
6245 			peer->stats.rx.to_stack.num);
6246 	DP_PRINT_STATS("Bytes Sent To Stack = %llu",
6247 			peer->stats.rx.to_stack.bytes);
6248 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
6249 		DP_PRINT_STATS("Ring Id = %d", i);
6250 		DP_PRINT_STATS("	Packets Received = %d",
6251 				peer->stats.rx.rcvd_reo[i].num);
6252 		DP_PRINT_STATS("	Bytes Received = %llu",
6253 				peer->stats.rx.rcvd_reo[i].bytes);
6254 	}
6255 	DP_PRINT_STATS("Multicast Packets Received = %d",
6256 			peer->stats.rx.multicast.num);
6257 	DP_PRINT_STATS("Multicast Bytes Received = %llu",
6258 			peer->stats.rx.multicast.bytes);
6259 	DP_PRINT_STATS("Broadcast Packets Received = %d",
6260 			peer->stats.rx.bcast.num);
6261 	DP_PRINT_STATS("Broadcast Bytes Received = %llu",
6262 			peer->stats.rx.bcast.bytes);
6263 	DP_PRINT_STATS("Intra BSS Packets Received = %d",
6264 			peer->stats.rx.intra_bss.pkts.num);
6265 	DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
6266 			peer->stats.rx.intra_bss.pkts.bytes);
6267 	DP_PRINT_STATS("Raw Packets Received = %d",
6268 			peer->stats.rx.raw.num);
6269 	DP_PRINT_STATS("Raw Bytes Received = %llu",
6270 			peer->stats.rx.raw.bytes);
6271 	DP_PRINT_STATS("Errors: MIC Errors = %d",
6272 			peer->stats.rx.err.mic_err);
6273 	DP_PRINT_STATS("Erros: Decryption Errors = %d",
6274 			peer->stats.rx.err.decrypt_err);
6275 	DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
6276 			peer->stats.rx.non_ampdu_cnt);
6277 	DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
6278 			peer->stats.rx.ampdu_cnt);
6279 	DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
6280 			peer->stats.rx.non_amsdu_cnt);
6281 	DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
6282 			peer->stats.rx.amsdu_cnt);
6283 	DP_PRINT_STATS("NAWDS : ");
6284 	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet = %d",
6285 			peer->stats.rx.nawds_mcast_drop);
6286 	DP_PRINT_STATS("SGI ="
6287 			" 0.8us %d"
6288 			" 0.4us %d"
6289 			" 1.6us %d"
6290 			" 3.2us %d",
6291 			peer->stats.rx.sgi_count[0],
6292 			peer->stats.rx.sgi_count[1],
6293 			peer->stats.rx.sgi_count[2],
6294 			peer->stats.rx.sgi_count[3]);
6295 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
6296 			peer->stats.rx.bw[0], peer->stats.rx.bw[1],
6297 			peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
6298 	DP_PRINT_STATS("Reception Type ="
6299 			" SU %d,"
6300 			" MU_MIMO %d,"
6301 			" MU_OFDMA %d,"
6302 			" MU_OFDMA_MIMO %d",
6303 			peer->stats.rx.reception_type[0],
6304 			peer->stats.rx.reception_type[1],
6305 			peer->stats.rx.reception_type[2],
6306 			peer->stats.rx.reception_type[3]);
6307 
6308 	dp_print_common_rates_info(peer->stats.rx.pkt_type);
6309 
6310 	index = 0;
6311 	for (i = 0; i < SS_COUNT; i++) {
6312 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6313 				" %d", peer->stats.rx.nss[i]);
6314 	}
6315 	DP_PRINT_STATS("NSS(1-8) = %s",
6316 			nss);
6317 
6318 	DP_PRINT_STATS("Aggregation:");
6319 	DP_PRINT_STATS("	Msdu's Part of Ampdu = %d",
6320 			peer->stats.rx.ampdu_cnt);
6321 	DP_PRINT_STATS("	Msdu's With No Mpdu Level Aggregation = %d",
6322 			peer->stats.rx.non_ampdu_cnt);
6323 	DP_PRINT_STATS("	Msdu's Part of Amsdu = %d",
6324 			peer->stats.rx.amsdu_cnt);
6325 	DP_PRINT_STATS("	Msdu's With No Msdu Level Aggregation = %d",
6326 			peer->stats.rx.non_amsdu_cnt);
6327 
6328 	DP_PRINT_STATS("Bytes and Packets received in last one sec:");
6329 	DP_PRINT_STATS("	Bytes received in last sec: %d",
6330 		       peer->stats.rx.rx_byte_rate);
6331 	DP_PRINT_STATS("	Data received in last sec: %d",
6332 		       peer->stats.rx.rx_data_rate);
6333 }
6334 
6335 /*
6336  * dp_get_host_peer_stats()- function to print peer stats
6337  * @pdev_handle: DP_PDEV handle
6338  * @mac_addr: mac address of the peer
6339  *
6340  * Return: void
6341  */
6342 static void
6343 dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
6344 {
6345 	struct dp_peer *peer;
6346 	uint8_t local_id;
6347 
6348 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
6349 			&local_id);
6350 
6351 	if (!peer) {
6352 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6353 			  "%s: Invalid peer\n", __func__);
6354 		return;
6355 	}
6356 
6357 	dp_print_peer_stats(peer);
6358 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
6359 }
6360 
6361 /**
6362  * dp_print_host_stats()- Function to print the stats aggregated at host
6363  * @vdev_handle: DP_VDEV handle
6364  * @type: host stats type
6365  *
6366  * Available Stat types
6367  * TXRX_CLEAR_STATS  : Clear the stats
6368  * TXRX_RX_RATE_STATS: Print Rx Rate Info
6369  * TXRX_TX_RATE_STATS: Print Tx Rate Info
6370  * TXRX_TX_HOST_STATS: Print Tx Stats
6371  * TXRX_RX_HOST_STATS: Print Rx Stats
6372  * TXRX_AST_STATS: Print AST Stats
6373  * TXRX_SRNG_PTR_STATS: Print SRNG ring pointer stats
6374  *
6375  * Return: 0 on success, print error message in case of failure
6376  */
6377 static int
6378 dp_print_host_stats(struct cdp_vdev *vdev_handle,
6379 		    struct cdp_txrx_stats_req *req)
6380 {
6381 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6382 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6383 	enum cdp_host_txrx_stats type =
6384 			dp_stats_mapping_table[req->stats][STATS_HOST];
6385 
6386 	dp_aggregate_pdev_stats(pdev);
6387 
6388 	switch (type) {
6389 	case TXRX_CLEAR_STATS:
6390 		dp_txrx_host_stats_clr(vdev);
6391 		break;
6392 	case TXRX_RX_RATE_STATS:
6393 		dp_print_rx_rates(vdev);
6394 		break;
6395 	case TXRX_TX_RATE_STATS:
6396 		dp_print_tx_rates(vdev);
6397 		break;
6398 	case TXRX_TX_HOST_STATS:
6399 		dp_print_pdev_tx_stats(pdev);
6400 		dp_print_soc_tx_stats(pdev->soc);
6401 		break;
6402 	case TXRX_RX_HOST_STATS:
6403 		dp_print_pdev_rx_stats(pdev);
6404 		dp_print_soc_rx_stats(pdev->soc);
6405 		break;
6406 	case TXRX_AST_STATS:
6407 		dp_print_ast_stats(pdev->soc);
6408 		dp_print_peer_table(vdev);
6409 		break;
6410 	case TXRX_SRNG_PTR_STATS:
6411 		dp_print_ring_stats(pdev);
6412 		break;
6413 	case TXRX_RX_MON_STATS:
6414 		dp_print_pdev_rx_mon_stats(pdev);
6415 		break;
6416 	case TXRX_REO_QUEUE_STATS:
6417 		dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
6418 		break;
6419 	default:
6420 		DP_TRACE(FATAL, "Wrong Input For TxRx Host Stats");
6421 		break;
6422 	}
6423 	return 0;
6424 }
6425 
6426 /*
6427  * dp_ppdu_ring_reset()- Reset PPDU Stats ring
6428  * @pdev: DP_PDEV handle
6429  *
6430  * Return: void
6431  */
6432 static void
6433 dp_ppdu_ring_reset(struct dp_pdev *pdev)
6434 {
6435 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
6436 	int mac_id;
6437 
6438 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
6439 
6440 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6441 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6442 							pdev->pdev_id);
6443 
6444 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6445 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6446 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6447 	}
6448 }
6449 
6450 /*
6451  * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
6452  * @pdev: DP_PDEV handle
6453  *
6454  * Return: void
6455  */
6456 static void
6457 dp_ppdu_ring_cfg(struct dp_pdev *pdev)
6458 {
6459 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
6460 	int mac_id;
6461 
6462 	htt_tlv_filter.mpdu_start = 1;
6463 	htt_tlv_filter.msdu_start = 0;
6464 	htt_tlv_filter.packet = 0;
6465 	htt_tlv_filter.msdu_end = 0;
6466 	htt_tlv_filter.mpdu_end = 0;
6467 	htt_tlv_filter.attention = 0;
6468 	htt_tlv_filter.ppdu_start = 1;
6469 	htt_tlv_filter.ppdu_end = 1;
6470 	htt_tlv_filter.ppdu_end_user_stats = 1;
6471 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6472 	htt_tlv_filter.ppdu_end_status_done = 1;
6473 	htt_tlv_filter.enable_fp = 1;
6474 	htt_tlv_filter.enable_md = 0;
6475 	if (pdev->neighbour_peers_added &&
6476 	    pdev->soc->hw_nac_monitor_support) {
6477 		htt_tlv_filter.enable_md = 1;
6478 		htt_tlv_filter.packet_header = 1;
6479 	}
6480 	if (pdev->mcopy_mode) {
6481 		htt_tlv_filter.packet_header = 1;
6482 		htt_tlv_filter.enable_mo = 1;
6483 	}
6484 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6485 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6486 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6487 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6488 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6489 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
6490 	if (pdev->neighbour_peers_added &&
6491 	    pdev->soc->hw_nac_monitor_support)
6492 		htt_tlv_filter.md_data_filter = FILTER_DATA_ALL;
6493 
6494 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6495 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6496 						pdev->pdev_id);
6497 
6498 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6499 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6500 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6501 	}
6502 }
6503 
6504 /*
6505  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
6506  *                              modes are enabled or not.
6507  * @dp_pdev: dp pdev handle.
6508  *
6509  * Return: bool
6510  */
6511 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
6512 {
6513 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
6514 	    !pdev->mcopy_mode)
6515 		return true;
6516 	else
6517 		return false;
6518 }
6519 
6520 /*
6521  *dp_set_bpr_enable() - API to enable/disable bpr feature
6522  *@pdev_handle: DP_PDEV handle.
6523  *@val: Provided value.
6524  *
6525  *Return: void
6526  */
6527 static void
6528 dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
6529 {
6530 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6531 
6532 	switch (val) {
6533 	case CDP_BPR_DISABLE:
6534 		pdev->bpr_enable = CDP_BPR_DISABLE;
6535 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6536 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
6537 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6538 		} else if (pdev->enhanced_stats_en &&
6539 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6540 			   !pdev->pktlog_ppdu_stats) {
6541 			dp_h2t_cfg_stats_msg_send(pdev,
6542 						  DP_PPDU_STATS_CFG_ENH_STATS,
6543 						  pdev->pdev_id);
6544 		}
6545 		break;
6546 	case CDP_BPR_ENABLE:
6547 		pdev->bpr_enable = CDP_BPR_ENABLE;
6548 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
6549 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
6550 			dp_h2t_cfg_stats_msg_send(pdev,
6551 						  DP_PPDU_STATS_CFG_BPR,
6552 						  pdev->pdev_id);
6553 		} else if (pdev->enhanced_stats_en &&
6554 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6555 			   !pdev->pktlog_ppdu_stats) {
6556 			dp_h2t_cfg_stats_msg_send(pdev,
6557 						  DP_PPDU_STATS_CFG_BPR_ENH,
6558 						  pdev->pdev_id);
6559 		} else if (pdev->pktlog_ppdu_stats) {
6560 			dp_h2t_cfg_stats_msg_send(pdev,
6561 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
6562 						  pdev->pdev_id);
6563 		}
6564 		break;
6565 	default:
6566 		break;
6567 	}
6568 }
6569 
6570 /*
6571  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
6572  * @pdev_handle: DP_PDEV handle
6573  * @val: user provided value
6574  *
6575  * Return: void
6576  */
6577 static void
6578 dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
6579 {
6580 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6581 
6582 	switch (val) {
6583 	case 0:
6584 		pdev->tx_sniffer_enable = 0;
6585 		pdev->mcopy_mode = 0;
6586 
6587 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6588 		    !pdev->bpr_enable) {
6589 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6590 			dp_ppdu_ring_reset(pdev);
6591 		} else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
6592 			dp_h2t_cfg_stats_msg_send(pdev,
6593 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
6594 		} else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
6595 			dp_h2t_cfg_stats_msg_send(pdev,
6596 						  DP_PPDU_STATS_CFG_BPR_ENH,
6597 						  pdev->pdev_id);
6598 		} else {
6599 			dp_h2t_cfg_stats_msg_send(pdev,
6600 						  DP_PPDU_STATS_CFG_BPR,
6601 						  pdev->pdev_id);
6602 		}
6603 		break;
6604 
6605 	case 1:
6606 		pdev->tx_sniffer_enable = 1;
6607 		pdev->mcopy_mode = 0;
6608 
6609 		if (!pdev->pktlog_ppdu_stats)
6610 			dp_h2t_cfg_stats_msg_send(pdev,
6611 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
6612 		break;
6613 	case 2:
6614 		pdev->mcopy_mode = 1;
6615 		pdev->tx_sniffer_enable = 0;
6616 		dp_ppdu_ring_cfg(pdev);
6617 
6618 		if (!pdev->pktlog_ppdu_stats)
6619 			dp_h2t_cfg_stats_msg_send(pdev,
6620 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
6621 		break;
6622 	default:
6623 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6624 			"Invalid value");
6625 		break;
6626 	}
6627 }
6628 
6629 /*
6630  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
6631  * @pdev_handle: DP_PDEV handle
6632  *
6633  * Return: void
6634  */
6635 static void
6636 dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
6637 {
6638 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6639 
6640 	if (pdev->enhanced_stats_en == 0)
6641 		dp_cal_client_timer_start(pdev->cal_client_ctx);
6642 
6643 	pdev->enhanced_stats_en = 1;
6644 
6645 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added)
6646 		dp_ppdu_ring_cfg(pdev);
6647 
6648 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
6649 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
6650 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
6651 		dp_h2t_cfg_stats_msg_send(pdev,
6652 					  DP_PPDU_STATS_CFG_BPR_ENH,
6653 					  pdev->pdev_id);
6654 	}
6655 }
6656 
6657 /*
6658  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
6659  * @pdev_handle: DP_PDEV handle
6660  *
6661  * Return: void
6662  */
6663 static void
6664 dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
6665 {
6666 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6667 
6668 	if (pdev->enhanced_stats_en == 1)
6669 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
6670 
6671 	pdev->enhanced_stats_en = 0;
6672 
6673 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
6674 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6675 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
6676 		dp_h2t_cfg_stats_msg_send(pdev,
6677 					  DP_PPDU_STATS_CFG_BPR,
6678 					  pdev->pdev_id);
6679 	}
6680 
6681 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added)
6682 		dp_ppdu_ring_reset(pdev);
6683 }
6684 
6685 /*
6686  * dp_get_fw_peer_stats()- function to print peer stats
6687  * @pdev_handle: DP_PDEV handle
6688  * @mac_addr: mac address of the peer
6689  * @cap: Type of htt stats requested
6690  *
6691  * Currently Supporting only MAC ID based requests Only
6692  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
6693  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
6694  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
6695  *
6696  * Return: void
6697  */
6698 static void
6699 dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
6700 		uint32_t cap)
6701 {
6702 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6703 	int i;
6704 	uint32_t config_param0 = 0;
6705 	uint32_t config_param1 = 0;
6706 	uint32_t config_param2 = 0;
6707 	uint32_t config_param3 = 0;
6708 
6709 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
6710 	config_param0 |= (1 << (cap + 1));
6711 
6712 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
6713 		config_param1 |= (1 << i);
6714 	}
6715 
6716 	config_param2 |= (mac_addr[0] & 0x000000ff);
6717 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
6718 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
6719 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
6720 
6721 	config_param3 |= (mac_addr[4] & 0x000000ff);
6722 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
6723 
6724 	dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
6725 			config_param0, config_param1, config_param2,
6726 			config_param3, 0, 0, 0);
6727 
6728 }
6729 
6730 /* This struct definition will be removed from here
6731  * once it get added in FW headers*/
6732 struct httstats_cmd_req {
6733     uint32_t    config_param0;
6734     uint32_t    config_param1;
6735     uint32_t    config_param2;
6736     uint32_t    config_param3;
6737     int cookie;
6738     u_int8_t    stats_id;
6739 };
6740 
6741 /*
6742  * dp_get_htt_stats: function to process the httstas request
6743  * @pdev_handle: DP pdev handle
6744  * @data: pointer to request data
6745  * @data_len: length for request data
6746  *
6747  * return: void
6748  */
6749 static void
6750 dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
6751 {
6752 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6753 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
6754 
6755 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
6756 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
6757 				req->config_param0, req->config_param1,
6758 				req->config_param2, req->config_param3,
6759 				req->cookie, 0, 0);
6760 }
6761 
6762 /*
6763  * dp_set_pdev_param: function to set parameters in pdev
6764  * @pdev_handle: DP pdev handle
6765  * @param: parameter type to be set
6766  * @val: value of parameter to be set
6767  *
6768  * return: void
6769  */
6770 static void dp_set_pdev_param(struct cdp_pdev *pdev_handle,
6771 		enum cdp_pdev_param_type param, uint8_t val)
6772 {
6773 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6774 	switch (param) {
6775 	case CDP_CONFIG_DEBUG_SNIFFER:
6776 		dp_config_debug_sniffer(pdev_handle, val);
6777 		break;
6778 	case CDP_CONFIG_BPR_ENABLE:
6779 		dp_set_bpr_enable(pdev_handle, val);
6780 		break;
6781 	case CDP_CONFIG_PRIMARY_RADIO:
6782 		pdev->is_primary = val;
6783 		break;
6784 	default:
6785 		break;
6786 	}
6787 }
6788 
6789 /*
6790  * dp_set_vdev_param: function to set parameters in vdev
6791  * @param: parameter type to be set
6792  * @val: value of parameter to be set
6793  *
6794  * return: void
6795  */
6796 static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
6797 		enum cdp_vdev_param_type param, uint32_t val)
6798 {
6799 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6800 	switch (param) {
6801 	case CDP_ENABLE_WDS:
6802 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6803 			  "wds_enable %d for vdev(%p) id(%d)\n",
6804 			  val, vdev, vdev->vdev_id);
6805 		vdev->wds_enabled = val;
6806 		break;
6807 	case CDP_ENABLE_NAWDS:
6808 		vdev->nawds_enabled = val;
6809 		break;
6810 	case CDP_ENABLE_MCAST_EN:
6811 		vdev->mcast_enhancement_en = val;
6812 		break;
6813 	case CDP_ENABLE_PROXYSTA:
6814 		vdev->proxysta_vdev = val;
6815 		break;
6816 	case CDP_UPDATE_TDLS_FLAGS:
6817 		vdev->tdls_link_connected = val;
6818 		break;
6819 	case CDP_CFG_WDS_AGING_TIMER:
6820 		if (val == 0)
6821 			qdf_timer_stop(&vdev->pdev->soc->wds_aging_timer);
6822 		else if (val != vdev->wds_aging_timer_val)
6823 			qdf_timer_mod(&vdev->pdev->soc->wds_aging_timer, val);
6824 
6825 		vdev->wds_aging_timer_val = val;
6826 		break;
6827 	case CDP_ENABLE_AP_BRIDGE:
6828 		if (wlan_op_mode_sta != vdev->opmode)
6829 			vdev->ap_bridge_enabled = val;
6830 		else
6831 			vdev->ap_bridge_enabled = false;
6832 		break;
6833 	case CDP_ENABLE_CIPHER:
6834 		vdev->sec_type = val;
6835 		break;
6836 	case CDP_ENABLE_QWRAP_ISOLATION:
6837 		vdev->isolation_vdev = val;
6838 		break;
6839 	default:
6840 		break;
6841 	}
6842 
6843 	dp_tx_vdev_update_search_flags(vdev);
6844 }
6845 
6846 /**
6847  * dp_peer_set_nawds: set nawds bit in peer
6848  * @peer_handle: pointer to peer
6849  * @value: enable/disable nawds
6850  *
6851  * return: void
6852  */
6853 static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
6854 {
6855 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6856 	peer->nawds_enabled = value;
6857 }
6858 
6859 /*
6860  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
6861  * @vdev_handle: DP_VDEV handle
6862  * @map_id:ID of map that needs to be updated
6863  *
6864  * Return: void
6865  */
6866 static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
6867 		uint8_t map_id)
6868 {
6869 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6870 	vdev->dscp_tid_map_id = map_id;
6871 	return;
6872 }
6873 
6874 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
6875  * @peer_handle: DP_PEER handle
6876  *
6877  * return : cdp_peer_stats pointer
6878  */
6879 static struct cdp_peer_stats*
6880 		dp_txrx_get_peer_stats(struct cdp_peer *peer_handle)
6881 {
6882 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6883 
6884 	qdf_assert(peer);
6885 
6886 	return &peer->stats;
6887 }
6888 
6889 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
6890  * @peer_handle: DP_PEER handle
6891  *
6892  * return : void
6893  */
6894 static void dp_txrx_reset_peer_stats(struct cdp_peer *peer_handle)
6895 {
6896 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6897 
6898 	qdf_assert(peer);
6899 
6900 	qdf_mem_set(&peer->stats, sizeof(peer->stats), 0);
6901 }
6902 
6903 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
6904  * @vdev_handle: DP_VDEV handle
6905  * @buf: buffer for vdev stats
6906  *
6907  * return : int
6908  */
6909 static int  dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
6910 				   bool is_aggregate)
6911 {
6912 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6913 	struct cdp_vdev_stats *vdev_stats = (struct cdp_vdev_stats *)buf;
6914 
6915 	if (is_aggregate)
6916 		dp_aggregate_vdev_stats(vdev, buf);
6917 	else
6918 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
6919 
6920 	return 0;
6921 }
6922 
6923 /*
6924  * dp_txrx_stats_publish(): publish pdev stats into a buffer
6925  * @pdev_handle: DP_PDEV handle
6926  * @buf: to hold pdev_stats
6927  *
6928  * Return: int
6929  */
6930 static int
6931 dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
6932 {
6933 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6934 	struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
6935 	struct cdp_txrx_stats_req req = {0,};
6936 
6937 	dp_aggregate_pdev_stats(pdev);
6938 	req.stats = HTT_DBG_EXT_STATS_PDEV_TX;
6939 	req.cookie_val = 1;
6940 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
6941 				req.param1, req.param2, req.param3, 0,
6942 				req.cookie_val, 0);
6943 
6944 	msleep(DP_MAX_SLEEP_TIME);
6945 
6946 	req.stats = HTT_DBG_EXT_STATS_PDEV_RX;
6947 	req.cookie_val = 1;
6948 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
6949 				req.param1, req.param2, req.param3, 0,
6950 				req.cookie_val, 0);
6951 
6952 	msleep(DP_MAX_SLEEP_TIME);
6953 	qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
6954 
6955 	return TXRX_STATS_LEVEL;
6956 }
6957 
6958 /**
6959  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
6960  * @pdev: DP_PDEV handle
6961  * @map_id: ID of map that needs to be updated
6962  * @tos: index value in map
6963  * @tid: tid value passed by the user
6964  *
6965  * Return: void
6966  */
6967 static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
6968 		uint8_t map_id, uint8_t tos, uint8_t tid)
6969 {
6970 	uint8_t dscp;
6971 	struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
6972 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
6973 	pdev->dscp_tid_map[map_id][dscp] = tid;
6974 	if (map_id < HAL_MAX_HW_DSCP_TID_MAPS)
6975 		hal_tx_update_dscp_tid(pdev->soc->hal_soc, tid,
6976 			map_id, dscp);
6977 	return;
6978 }
6979 
6980 /**
6981  * dp_fw_stats_process(): Process TxRX FW stats request
6982  * @vdev_handle: DP VDEV handle
6983  * @req: stats request
6984  *
6985  * return: int
6986  */
6987 static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
6988 		struct cdp_txrx_stats_req *req)
6989 {
6990 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6991 	struct dp_pdev *pdev = NULL;
6992 	uint32_t stats = req->stats;
6993 	uint8_t mac_id = req->mac_id;
6994 
6995 	if (!vdev) {
6996 		DP_TRACE(NONE, "VDEV not found");
6997 		return 1;
6998 	}
6999 	pdev = vdev->pdev;
7000 
7001 	/*
7002 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
7003 	 * from param0 to param3 according to below rule:
7004 	 *
7005 	 * PARAM:
7006 	 *   - config_param0 : start_offset (stats type)
7007 	 *   - config_param1 : stats bmask from start offset
7008 	 *   - config_param2 : stats bmask from start offset + 32
7009 	 *   - config_param3 : stats bmask from start offset + 64
7010 	 */
7011 	if (req->stats == CDP_TXRX_STATS_0) {
7012 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
7013 		req->param1 = 0xFFFFFFFF;
7014 		req->param2 = 0xFFFFFFFF;
7015 		req->param3 = 0xFFFFFFFF;
7016 	}
7017 
7018 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
7019 				req->param1, req->param2, req->param3,
7020 				0, 0, mac_id);
7021 }
7022 
7023 /**
7024  * dp_txrx_stats_request - function to map to firmware and host stats
7025  * @vdev: virtual handle
7026  * @req: stats request
7027  *
7028  * Return: QDF_STATUS
7029  */
7030 static
7031 QDF_STATUS dp_txrx_stats_request(struct cdp_vdev *vdev,
7032 				 struct cdp_txrx_stats_req *req)
7033 {
7034 	int host_stats;
7035 	int fw_stats;
7036 	enum cdp_stats stats;
7037 	int num_stats;
7038 
7039 	if (!vdev || !req) {
7040 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7041 				"Invalid vdev/req instance");
7042 		return QDF_STATUS_E_INVAL;
7043 	}
7044 
7045 	stats = req->stats;
7046 	if (stats >= CDP_TXRX_MAX_STATS)
7047 		return QDF_STATUS_E_INVAL;
7048 
7049 	/*
7050 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
7051 	 *			has to be updated if new FW HTT stats added
7052 	 */
7053 	if (stats > CDP_TXRX_STATS_HTT_MAX)
7054 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
7055 
7056 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
7057 
7058 	if (stats >= num_stats) {
7059 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7060 			  "%s: Invalid stats option: %d", __func__, stats);
7061 		return QDF_STATUS_E_INVAL;
7062 	}
7063 
7064 	req->stats = stats;
7065 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
7066 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
7067 
7068 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7069 		 "stats: %u fw_stats_type: %d host_stats: %d",
7070 		  stats, fw_stats, host_stats);
7071 
7072 	if (fw_stats != TXRX_FW_STATS_INVALID) {
7073 		/* update request with FW stats type */
7074 		req->stats = fw_stats;
7075 		return dp_fw_stats_process(vdev, req);
7076 	}
7077 
7078 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
7079 			(host_stats <= TXRX_HOST_STATS_MAX))
7080 		return dp_print_host_stats(vdev, req);
7081 	else
7082 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7083 				"Wrong Input for TxRx Stats");
7084 
7085 	return QDF_STATUS_SUCCESS;
7086 }
7087 
7088 /*
7089  * dp_print_napi_stats(): NAPI stats
7090  * @soc - soc handle
7091  */
7092 static void dp_print_napi_stats(struct dp_soc *soc)
7093 {
7094 	hif_print_napi_stats(soc->hif_handle);
7095 }
7096 
7097 /*
7098  * dp_print_per_ring_stats(): Packet count per ring
7099  * @soc - soc handle
7100  */
7101 static void dp_print_per_ring_stats(struct dp_soc *soc)
7102 {
7103 	uint8_t ring;
7104 	uint16_t core;
7105 	uint64_t total_packets;
7106 
7107 	DP_TRACE_STATS(INFO_HIGH, "Reo packets per ring:");
7108 	for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
7109 		total_packets = 0;
7110 		DP_TRACE_STATS(INFO_HIGH,
7111 			       "Packets on ring %u:", ring);
7112 		for (core = 0; core < NR_CPUS; core++) {
7113 			DP_TRACE_STATS(INFO_HIGH,
7114 				       "Packets arriving on core %u: %llu",
7115 				       core,
7116 				       soc->stats.rx.ring_packets[core][ring]);
7117 			total_packets += soc->stats.rx.ring_packets[core][ring];
7118 		}
7119 		DP_TRACE_STATS(INFO_HIGH,
7120 			       "Total packets on ring %u: %llu",
7121 			       ring, total_packets);
7122 	}
7123 }
7124 
7125 /*
7126  * dp_txrx_path_stats() - Function to display dump stats
7127  * @soc - soc handle
7128  *
7129  * return: none
7130  */
7131 static void dp_txrx_path_stats(struct dp_soc *soc)
7132 {
7133 	uint8_t error_code;
7134 	uint8_t loop_pdev;
7135 	struct dp_pdev *pdev;
7136 	uint8_t i;
7137 
7138 	if (!soc) {
7139 		DP_TRACE(ERROR, "%s: Invalid access",
7140 			 __func__);
7141 		return;
7142 	}
7143 
7144 	for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
7145 
7146 		pdev = soc->pdev_list[loop_pdev];
7147 		dp_aggregate_pdev_stats(pdev);
7148 		DP_TRACE_STATS(INFO_HIGH, "Tx path Statistics:");
7149 		DP_TRACE_STATS(INFO_HIGH, "from stack: %u msdus (%llu bytes)",
7150 			       pdev->stats.tx_i.rcvd.num,
7151 			       pdev->stats.tx_i.rcvd.bytes);
7152 		DP_TRACE_STATS(INFO_HIGH,
7153 			       "processed from host: %u msdus (%llu bytes)",
7154 			       pdev->stats.tx_i.processed.num,
7155 			       pdev->stats.tx_i.processed.bytes);
7156 		DP_TRACE_STATS(INFO_HIGH,
7157 			       "successfully transmitted: %u msdus (%llu bytes)",
7158 			       pdev->stats.tx.tx_success.num,
7159 			       pdev->stats.tx.tx_success.bytes);
7160 
7161 		DP_TRACE_STATS(INFO_HIGH, "Dropped in host:");
7162 		DP_TRACE_STATS(INFO_HIGH, "Total packets dropped: %u,",
7163 			       pdev->stats.tx_i.dropped.dropped_pkt.num);
7164 		DP_TRACE_STATS(INFO_HIGH, "Descriptor not available: %u",
7165 			       pdev->stats.tx_i.dropped.desc_na.num);
7166 		DP_TRACE_STATS(INFO_HIGH, "Ring full: %u",
7167 			       pdev->stats.tx_i.dropped.ring_full);
7168 		DP_TRACE_STATS(INFO_HIGH, "Enqueue fail: %u",
7169 			       pdev->stats.tx_i.dropped.enqueue_fail);
7170 		DP_TRACE_STATS(INFO_HIGH, "DMA Error: %u",
7171 			       pdev->stats.tx_i.dropped.dma_error);
7172 
7173 		DP_TRACE_STATS(INFO_HIGH, "Dropped in hardware:");
7174 		DP_TRACE_STATS(INFO_HIGH, "total packets dropped: %u",
7175 			       pdev->stats.tx.tx_failed);
7176 		DP_TRACE_STATS(INFO_HIGH, "mpdu age out: %u",
7177 			       pdev->stats.tx.dropped.age_out);
7178 		DP_TRACE_STATS(INFO_HIGH, "firmware removed: %u",
7179 			       pdev->stats.tx.dropped.fw_rem);
7180 		DP_TRACE_STATS(INFO_HIGH, "firmware removed tx: %u",
7181 			       pdev->stats.tx.dropped.fw_rem_tx);
7182 		DP_TRACE_STATS(INFO_HIGH, "firmware removed notx %u",
7183 			       pdev->stats.tx.dropped.fw_rem_notx);
7184 		DP_TRACE_STATS(INFO_HIGH, "peer_invalid: %u",
7185 			       pdev->soc->stats.tx.tx_invalid_peer.num);
7186 
7187 		DP_TRACE_STATS(INFO_HIGH, "Tx packets sent per interrupt:");
7188 		DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
7189 			       pdev->stats.tx_comp_histogram.pkts_1);
7190 		DP_TRACE_STATS(INFO_HIGH, "2-20 Packets:  %u",
7191 			       pdev->stats.tx_comp_histogram.pkts_2_20);
7192 		DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
7193 			       pdev->stats.tx_comp_histogram.pkts_21_40);
7194 		DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
7195 			       pdev->stats.tx_comp_histogram.pkts_41_60);
7196 		DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
7197 			       pdev->stats.tx_comp_histogram.pkts_61_80);
7198 		DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
7199 			       pdev->stats.tx_comp_histogram.pkts_81_100);
7200 		DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
7201 			       pdev->stats.tx_comp_histogram.pkts_101_200);
7202 		DP_TRACE_STATS(INFO_HIGH, "   201+ Packets: %u",
7203 			       pdev->stats.tx_comp_histogram.pkts_201_plus);
7204 
7205 		DP_TRACE_STATS(INFO_HIGH, "Rx path statistics");
7206 
7207 		DP_TRACE_STATS(INFO_HIGH,
7208 			       "delivered %u msdus ( %llu bytes),",
7209 			       pdev->stats.rx.to_stack.num,
7210 			       pdev->stats.rx.to_stack.bytes);
7211 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)
7212 			DP_TRACE_STATS(INFO_HIGH,
7213 				       "received on reo[%d] %u msdus( %llu bytes),",
7214 				       i, pdev->stats.rx.rcvd_reo[i].num,
7215 				       pdev->stats.rx.rcvd_reo[i].bytes);
7216 		DP_TRACE_STATS(INFO_HIGH,
7217 			       "intra-bss packets %u msdus ( %llu bytes),",
7218 			       pdev->stats.rx.intra_bss.pkts.num,
7219 			       pdev->stats.rx.intra_bss.pkts.bytes);
7220 		DP_TRACE_STATS(INFO_HIGH,
7221 			       "intra-bss fails %u msdus ( %llu bytes),",
7222 			       pdev->stats.rx.intra_bss.fail.num,
7223 			       pdev->stats.rx.intra_bss.fail.bytes);
7224 		DP_TRACE_STATS(INFO_HIGH,
7225 			       "raw packets %u msdus ( %llu bytes),",
7226 			       pdev->stats.rx.raw.num,
7227 			       pdev->stats.rx.raw.bytes);
7228 		DP_TRACE_STATS(INFO_HIGH, "dropped: error %u msdus",
7229 			       pdev->stats.rx.err.mic_err);
7230 		DP_TRACE_STATS(INFO_HIGH, "peer invalid %u",
7231 			       pdev->soc->stats.rx.err.rx_invalid_peer.num);
7232 
7233 		DP_TRACE_STATS(INFO_HIGH, "Reo Statistics");
7234 		DP_TRACE_STATS(INFO_HIGH, "rbm error: %u msdus",
7235 			       pdev->soc->stats.rx.err.invalid_rbm);
7236 		DP_TRACE_STATS(INFO_HIGH, "hal ring access fail: %u msdus",
7237 			       pdev->soc->stats.rx.err.hal_ring_access_fail);
7238 
7239 		for (error_code = 0; error_code < HAL_REO_ERR_MAX;
7240 				error_code++) {
7241 			if (!pdev->soc->stats.rx.err.reo_error[error_code])
7242 				continue;
7243 			DP_TRACE_STATS(INFO_HIGH,
7244 				       "Reo error number (%u): %u msdus",
7245 				       error_code,
7246 				       pdev->soc->stats.rx.err
7247 				       .reo_error[error_code]);
7248 		}
7249 
7250 		for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
7251 				error_code++) {
7252 			if (!pdev->soc->stats.rx.err.rxdma_error[error_code])
7253 				continue;
7254 			DP_TRACE_STATS(INFO_HIGH,
7255 				       "Rxdma error number (%u): %u msdus",
7256 				       error_code,
7257 				       pdev->soc->stats.rx.err
7258 				       .rxdma_error[error_code]);
7259 		}
7260 
7261 		DP_TRACE_STATS(INFO_HIGH, "Rx packets reaped per interrupt:");
7262 		DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
7263 			       pdev->stats.rx_ind_histogram.pkts_1);
7264 		DP_TRACE_STATS(INFO_HIGH, "2-20 Packets:  %u",
7265 			       pdev->stats.rx_ind_histogram.pkts_2_20);
7266 		DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
7267 			       pdev->stats.rx_ind_histogram.pkts_21_40);
7268 		DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
7269 			       pdev->stats.rx_ind_histogram.pkts_41_60);
7270 		DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
7271 			       pdev->stats.rx_ind_histogram.pkts_61_80);
7272 		DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
7273 			       pdev->stats.rx_ind_histogram.pkts_81_100);
7274 		DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
7275 			       pdev->stats.rx_ind_histogram.pkts_101_200);
7276 		DP_TRACE_STATS(INFO_HIGH, "   201+ Packets: %u",
7277 			       pdev->stats.rx_ind_histogram.pkts_201_plus);
7278 
7279 		DP_TRACE_STATS(INFO_HIGH, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
7280 			       __func__,
7281 			       pdev->soc->wlan_cfg_ctx
7282 			       ->tso_enabled,
7283 			       pdev->soc->wlan_cfg_ctx
7284 			       ->lro_enabled,
7285 			       pdev->soc->wlan_cfg_ctx
7286 			       ->rx_hash,
7287 			       pdev->soc->wlan_cfg_ctx
7288 			       ->napi_enabled);
7289 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7290 		DP_TRACE_STATS(INFO_HIGH, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
7291 			       __func__,
7292 			       pdev->soc->wlan_cfg_ctx
7293 			       ->tx_flow_stop_queue_threshold,
7294 			       pdev->soc->wlan_cfg_ctx
7295 			       ->tx_flow_start_queue_offset);
7296 #endif
7297 	}
7298 }
7299 
7300 /*
7301  * dp_txrx_dump_stats() -  Dump statistics
7302  * @value - Statistics option
7303  */
7304 static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
7305 				     enum qdf_stats_verbosity_level level)
7306 {
7307 	struct dp_soc *soc =
7308 		(struct dp_soc *)psoc;
7309 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7310 
7311 	if (!soc) {
7312 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7313 			"%s: soc is NULL", __func__);
7314 		return QDF_STATUS_E_INVAL;
7315 	}
7316 
7317 	switch (value) {
7318 	case CDP_TXRX_PATH_STATS:
7319 		dp_txrx_path_stats(soc);
7320 		break;
7321 
7322 	case CDP_RX_RING_STATS:
7323 		dp_print_per_ring_stats(soc);
7324 		break;
7325 
7326 	case CDP_TXRX_TSO_STATS:
7327 		/* TODO: NOT IMPLEMENTED */
7328 		break;
7329 
7330 	case CDP_DUMP_TX_FLOW_POOL_INFO:
7331 		cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
7332 		break;
7333 
7334 	case CDP_DP_NAPI_STATS:
7335 		dp_print_napi_stats(soc);
7336 		break;
7337 
7338 	case CDP_TXRX_DESC_STATS:
7339 		/* TODO: NOT IMPLEMENTED */
7340 		break;
7341 
7342 	default:
7343 		status = QDF_STATUS_E_INVAL;
7344 		break;
7345 	}
7346 
7347 	return status;
7348 
7349 }
7350 
7351 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7352 /**
7353  * dp_update_flow_control_parameters() - API to store datapath
7354  *                            config parameters
7355  * @soc: soc handle
7356  * @cfg: ini parameter handle
7357  *
7358  * Return: void
7359  */
7360 static inline
7361 void dp_update_flow_control_parameters(struct dp_soc *soc,
7362 				struct cdp_config_params *params)
7363 {
7364 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
7365 					params->tx_flow_stop_queue_threshold;
7366 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
7367 					params->tx_flow_start_queue_offset;
7368 }
7369 #else
7370 static inline
7371 void dp_update_flow_control_parameters(struct dp_soc *soc,
7372 				struct cdp_config_params *params)
7373 {
7374 }
7375 #endif
7376 
7377 /**
7378  * dp_update_config_parameters() - API to store datapath
7379  *                            config parameters
7380  * @soc: soc handle
7381  * @cfg: ini parameter handle
7382  *
7383  * Return: status
7384  */
7385 static
7386 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
7387 				struct cdp_config_params *params)
7388 {
7389 	struct dp_soc *soc = (struct dp_soc *)psoc;
7390 
7391 	if (!(soc)) {
7392 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7393 				"%s: Invalid handle", __func__);
7394 		return QDF_STATUS_E_INVAL;
7395 	}
7396 
7397 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
7398 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
7399 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
7400 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
7401 				params->tcp_udp_checksumoffload;
7402 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
7403 	dp_update_flow_control_parameters(soc, params);
7404 
7405 	return QDF_STATUS_SUCCESS;
7406 }
7407 
7408 /**
7409  * dp_txrx_set_wds_rx_policy() - API to store datapath
7410  *                            config parameters
7411  * @vdev_handle - datapath vdev handle
7412  * @cfg: ini parameter handle
7413  *
7414  * Return: status
7415  */
7416 #ifdef WDS_VENDOR_EXTENSION
7417 void
7418 dp_txrx_set_wds_rx_policy(
7419 		struct cdp_vdev *vdev_handle,
7420 		u_int32_t val)
7421 {
7422 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7423 	struct dp_peer *peer;
7424 	if (vdev->opmode == wlan_op_mode_ap) {
7425 		/* for ap, set it on bss_peer */
7426 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
7427 			if (peer->bss_peer) {
7428 				peer->wds_ecm.wds_rx_filter = 1;
7429 				peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7430 				peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7431 				break;
7432 			}
7433 		}
7434 	} else if (vdev->opmode == wlan_op_mode_sta) {
7435 		peer = TAILQ_FIRST(&vdev->peer_list);
7436 		peer->wds_ecm.wds_rx_filter = 1;
7437 		peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7438 		peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7439 	}
7440 }
7441 
7442 /**
7443  * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
7444  *
7445  * @peer_handle - datapath peer handle
7446  * @wds_tx_ucast: policy for unicast transmission
7447  * @wds_tx_mcast: policy for multicast transmission
7448  *
7449  * Return: void
7450  */
7451 void
7452 dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
7453 		int wds_tx_ucast, int wds_tx_mcast)
7454 {
7455 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
7456 	if (wds_tx_ucast || wds_tx_mcast) {
7457 		peer->wds_enabled = 1;
7458 		peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
7459 		peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
7460 	} else {
7461 		peer->wds_enabled = 0;
7462 		peer->wds_ecm.wds_tx_ucast_4addr = 0;
7463 		peer->wds_ecm.wds_tx_mcast_4addr = 0;
7464 	}
7465 
7466 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7467 			FL("Policy Update set to :\
7468 				peer->wds_enabled %d\
7469 				peer->wds_ecm.wds_tx_ucast_4addr %d\
7470 				peer->wds_ecm.wds_tx_mcast_4addr %d"),
7471 				peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
7472 				peer->wds_ecm.wds_tx_mcast_4addr);
7473 	return;
7474 }
7475 #endif
7476 
7477 static struct cdp_wds_ops dp_ops_wds = {
7478 	.vdev_set_wds = dp_vdev_set_wds,
7479 #ifdef WDS_VENDOR_EXTENSION
7480 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
7481 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
7482 #endif
7483 };
7484 
7485 /*
7486  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
7487  * @vdev_handle - datapath vdev handle
7488  * @callback - callback function
7489  * @ctxt: callback context
7490  *
7491  */
7492 static void
7493 dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
7494 		       ol_txrx_data_tx_cb callback, void *ctxt)
7495 {
7496 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7497 
7498 	vdev->tx_non_std_data_callback.func = callback;
7499 	vdev->tx_non_std_data_callback.ctxt = ctxt;
7500 }
7501 
7502 /**
7503  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
7504  * @pdev_hdl: datapath pdev handle
7505  *
7506  * Return: opaque pointer to dp txrx handle
7507  */
7508 static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
7509 {
7510 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7511 
7512 	return pdev->dp_txrx_handle;
7513 }
7514 
7515 /**
7516  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
7517  * @pdev_hdl: datapath pdev handle
7518  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
7519  *
7520  * Return: void
7521  */
7522 static void
7523 dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
7524 {
7525 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7526 
7527 	pdev->dp_txrx_handle = dp_txrx_hdl;
7528 }
7529 
7530 /**
7531  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
7532  * @soc_handle: datapath soc handle
7533  *
7534  * Return: opaque pointer to external dp (non-core DP)
7535  */
7536 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
7537 {
7538 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
7539 
7540 	return soc->external_txrx_handle;
7541 }
7542 
7543 /**
7544  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
7545  * @soc_handle: datapath soc handle
7546  * @txrx_handle: opaque pointer to external dp (non-core DP)
7547  *
7548  * Return: void
7549  */
7550 static void
7551 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
7552 {
7553 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
7554 
7555 	soc->external_txrx_handle = txrx_handle;
7556 }
7557 
7558 #ifdef FEATURE_AST
7559 static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
7560 {
7561 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
7562 	struct dp_peer *peer = (struct dp_peer *) peer_hdl;
7563 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7564 
7565 	/*
7566 	 * For BSS peer, new peer is not created on alloc_node if the
7567 	 * peer with same address already exists , instead refcnt is
7568 	 * increased for existing peer. Correspondingly in delete path,
7569 	 * only refcnt is decreased; and peer is only deleted , when all
7570 	 * references are deleted. So delete_in_progress should not be set
7571 	 * for bss_peer, unless only 2 reference remains (peer map reference
7572 	 * and peer hash table reference).
7573 	 */
7574 	if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
7575 		return;
7576 	}
7577 
7578 	peer->delete_in_progress = true;
7579 	dp_peer_delete_ast_entries(soc, peer);
7580 }
7581 #endif
7582 
7583 #ifdef ATH_SUPPORT_NAC_RSSI
7584 /**
7585  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
7586  * @vdev_hdl: DP vdev handle
7587  * @rssi: rssi value
7588  *
7589  * Return: 0 for success. nonzero for failure.
7590  */
7591 QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
7592 				       char *mac_addr,
7593 				       uint8_t *rssi)
7594 {
7595 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7596 	struct dp_pdev *pdev = vdev->pdev;
7597 	struct dp_neighbour_peer *peer = NULL;
7598 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
7599 
7600 	*rssi = 0;
7601 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
7602 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
7603 		      neighbour_peer_list_elem) {
7604 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
7605 				mac_addr, DP_MAC_ADDR_LEN) == 0) {
7606 			*rssi = peer->rssi;
7607 			status = QDF_STATUS_SUCCESS;
7608 			break;
7609 		}
7610 	}
7611 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
7612 	return status;
7613 }
7614 
7615 static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
7616 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
7617 		uint8_t chan_num)
7618 {
7619 
7620 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7621 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7622 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7623 
7624 	pdev->nac_rssi_filtering = 1;
7625 	/* Store address of NAC (neighbour peer) which will be checked
7626 	 * against TA of received packets.
7627 	 */
7628 
7629 	if (cmd == CDP_NAC_PARAM_ADD) {
7630 		dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
7631 						 client_macaddr);
7632 	} else if (cmd == CDP_NAC_PARAM_DEL) {
7633 		dp_update_filter_neighbour_peers(vdev_handle,
7634 						 DP_NAC_PARAM_DEL,
7635 						 client_macaddr);
7636 	}
7637 
7638 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
7639 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
7640 			((void *)vdev->pdev->ctrl_pdev,
7641 			 vdev->vdev_id, cmd, bssid);
7642 
7643 	return QDF_STATUS_SUCCESS;
7644 }
7645 #endif
7646 
7647 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
7648 		uint32_t max_peers)
7649 {
7650 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7651 
7652 	soc->max_peers = max_peers;
7653 
7654 	qdf_print ("%s max_peers %u\n", __func__, max_peers);
7655 
7656 	if (dp_peer_find_attach(soc))
7657 		return QDF_STATUS_E_FAILURE;
7658 
7659 	return QDF_STATUS_SUCCESS;
7660 }
7661 
7662 /**
7663  * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
7664  * @dp_pdev: dp pdev handle
7665  * @ctrl_pdev: UMAC ctrl pdev handle
7666  *
7667  * Return: void
7668  */
7669 static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
7670 				  struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
7671 {
7672 	struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
7673 
7674 	pdev->ctrl_pdev = ctrl_pdev;
7675 }
7676 
7677 static struct cdp_cmn_ops dp_ops_cmn = {
7678 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
7679 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
7680 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
7681 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
7682 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
7683 	.txrx_peer_create = dp_peer_create_wifi3,
7684 	.txrx_peer_setup = dp_peer_setup_wifi3,
7685 #ifdef FEATURE_AST
7686 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
7687 #else
7688 	.txrx_peer_teardown = NULL,
7689 #endif
7690 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
7691 	.txrx_peer_del_ast = dp_peer_del_ast_wifi3,
7692 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
7693 	.txrx_peer_ast_hash_find = dp_peer_ast_hash_find_wifi3,
7694 	.txrx_peer_ast_get_pdev_id = dp_peer_ast_get_pdev_id_wifi3,
7695 	.txrx_peer_ast_get_next_hop = dp_peer_ast_get_next_hop_wifi3,
7696 	.txrx_peer_ast_set_type = dp_peer_ast_set_type_wifi3,
7697 	.txrx_peer_ast_get_type = dp_peer_ast_get_type_wifi3,
7698 	.txrx_peer_delete = dp_peer_delete_wifi3,
7699 	.txrx_vdev_register = dp_vdev_register_wifi3,
7700 	.txrx_soc_detach = dp_soc_detach_wifi3,
7701 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
7702 	.txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
7703 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
7704 	.txrx_ath_getstats = dp_get_device_stats,
7705 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
7706 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
7707 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
7708 	.delba_process = dp_delba_process_wifi3,
7709 	.set_addba_response = dp_set_addba_response,
7710 	.get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
7711 	.flush_cache_rx_queue = NULL,
7712 	/* TODO: get API's for dscp-tid need to be added*/
7713 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
7714 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
7715 	.txrx_stats_request = dp_txrx_stats_request,
7716 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
7717 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
7718 	.txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
7719 	.txrx_set_nac = dp_set_nac,
7720 	.txrx_get_tx_pending = dp_get_tx_pending,
7721 	.txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
7722 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
7723 	.display_stats = dp_txrx_dump_stats,
7724 	.txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
7725 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
7726 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
7727 	.txrx_intr_detach = dp_soc_interrupt_detach,
7728 	.set_pn_check = dp_set_pn_check_wifi3,
7729 	.update_config_parameters = dp_update_config_parameters,
7730 	/* TODO: Add other functions */
7731 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
7732 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
7733 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
7734 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
7735 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
7736 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
7737 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
7738 	.tx_send = dp_tx_send,
7739 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
7740 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
7741 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
7742 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
7743 	.txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
7744 	.txrx_get_os_rx_handles_from_vdev =
7745 					dp_get_os_rx_handles_from_vdev_wifi3,
7746 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
7747 };
7748 
7749 static struct cdp_ctrl_ops dp_ops_ctrl = {
7750 	.txrx_peer_authorize = dp_peer_authorize,
7751 #ifdef QCA_SUPPORT_SON
7752 	.txrx_set_inact_params = dp_set_inact_params,
7753 	.txrx_start_inact_timer = dp_start_inact_timer,
7754 	.txrx_set_overload = dp_set_overload,
7755 	.txrx_peer_is_inact = dp_peer_is_inact,
7756 	.txrx_mark_peer_inact = dp_mark_peer_inact,
7757 #endif
7758 	.txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
7759 	.txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
7760 #ifdef MESH_MODE_SUPPORT
7761 	.txrx_set_mesh_mode  = dp_peer_set_mesh_mode,
7762 	.txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
7763 #endif
7764 	.txrx_set_vdev_param = dp_set_vdev_param,
7765 	.txrx_peer_set_nawds = dp_peer_set_nawds,
7766 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
7767 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
7768 	.txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
7769 	.txrx_update_filter_neighbour_peers =
7770 		dp_update_filter_neighbour_peers,
7771 	.txrx_get_sec_type = dp_get_sec_type,
7772 	/* TODO: Add other functions */
7773 	.txrx_wdi_event_sub = dp_wdi_event_sub,
7774 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
7775 #ifdef WDI_EVENT_ENABLE
7776 	.txrx_get_pldev = dp_get_pldev,
7777 #endif
7778 	.txrx_set_pdev_param = dp_set_pdev_param,
7779 #ifdef ATH_SUPPORT_NAC_RSSI
7780 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
7781 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
7782 #endif
7783 	.set_key = dp_set_michael_key,
7784 };
7785 
7786 static struct cdp_me_ops dp_ops_me = {
7787 #ifdef ATH_SUPPORT_IQUE
7788 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
7789 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
7790 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
7791 #endif
7792 };
7793 
7794 static struct cdp_mon_ops dp_ops_mon = {
7795 	.txrx_monitor_set_filter_ucast_data = NULL,
7796 	.txrx_monitor_set_filter_mcast_data = NULL,
7797 	.txrx_monitor_set_filter_non_data = NULL,
7798 	.txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
7799 	.txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
7800 	.txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
7801 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
7802 	/* Added support for HK advance filter */
7803 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
7804 };
7805 
7806 static struct cdp_host_stats_ops dp_ops_host_stats = {
7807 	.txrx_per_peer_stats = dp_get_host_peer_stats,
7808 	.get_fw_peer_stats = dp_get_fw_peer_stats,
7809 	.get_htt_stats = dp_get_htt_stats,
7810 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
7811 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
7812 	.txrx_stats_publish = dp_txrx_stats_publish,
7813 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
7814 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
7815 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
7816 	/* TODO */
7817 };
7818 
7819 static struct cdp_raw_ops dp_ops_raw = {
7820 	/* TODO */
7821 };
7822 
7823 #ifdef CONFIG_WIN
7824 static struct cdp_pflow_ops dp_ops_pflow = {
7825 	/* TODO */
7826 };
7827 #endif /* CONFIG_WIN */
7828 
7829 #ifdef FEATURE_RUNTIME_PM
7830 /**
7831  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
7832  * @opaque_pdev: DP pdev context
7833  *
7834  * DP is ready to runtime suspend if there are no pending TX packets.
7835  *
7836  * Return: QDF_STATUS
7837  */
7838 static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
7839 {
7840 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7841 	struct dp_soc *soc = pdev->soc;
7842 
7843 	/* Abort if there are any pending TX packets */
7844 	if (dp_get_tx_pending(opaque_pdev) > 0) {
7845 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7846 			  FL("Abort suspend due to pending TX packets"));
7847 		return QDF_STATUS_E_AGAIN;
7848 	}
7849 
7850 	if (soc->intr_mode == DP_INTR_POLL)
7851 		qdf_timer_stop(&soc->int_timer);
7852 
7853 	return QDF_STATUS_SUCCESS;
7854 }
7855 
7856 /**
7857  * dp_runtime_resume() - ensure DP is ready to runtime resume
7858  * @opaque_pdev: DP pdev context
7859  *
7860  * Resume DP for runtime PM.
7861  *
7862  * Return: QDF_STATUS
7863  */
7864 static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
7865 {
7866 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7867 	struct dp_soc *soc = pdev->soc;
7868 	void *hal_srng;
7869 	int i;
7870 
7871 	if (soc->intr_mode == DP_INTR_POLL)
7872 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7873 
7874 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
7875 		hal_srng = soc->tcl_data_ring[i].hal_srng;
7876 		if (hal_srng) {
7877 			/* We actually only need to acquire the lock */
7878 			hal_srng_access_start(soc->hal_soc, hal_srng);
7879 			/* Update SRC ring head pointer for HW to send
7880 			   all pending packets */
7881 			hal_srng_access_end(soc->hal_soc, hal_srng);
7882 		}
7883 	}
7884 
7885 	return QDF_STATUS_SUCCESS;
7886 }
7887 #endif /* FEATURE_RUNTIME_PM */
7888 
7889 static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
7890 {
7891 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7892 	struct dp_soc *soc = pdev->soc;
7893 
7894 	if (soc->intr_mode == DP_INTR_POLL)
7895 		qdf_timer_stop(&soc->int_timer);
7896 
7897 	return QDF_STATUS_SUCCESS;
7898 }
7899 
7900 static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
7901 {
7902 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7903 	struct dp_soc *soc = pdev->soc;
7904 
7905 	if (soc->intr_mode == DP_INTR_POLL)
7906 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7907 
7908 	return QDF_STATUS_SUCCESS;
7909 }
7910 
7911 #ifndef CONFIG_WIN
7912 static struct cdp_misc_ops dp_ops_misc = {
7913 	.tx_non_std = dp_tx_non_std,
7914 	.get_opmode = dp_get_opmode,
7915 #ifdef FEATURE_RUNTIME_PM
7916 	.runtime_suspend = dp_runtime_suspend,
7917 	.runtime_resume = dp_runtime_resume,
7918 #endif /* FEATURE_RUNTIME_PM */
7919 	.pkt_log_init = dp_pkt_log_init,
7920 	.pkt_log_con_service = dp_pkt_log_con_service,
7921 };
7922 
7923 static struct cdp_flowctl_ops dp_ops_flowctl = {
7924 	/* WIFI 3.0 DP implement as required. */
7925 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7926 	.flow_pool_map_handler = dp_tx_flow_pool_map,
7927 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
7928 	.register_pause_cb = dp_txrx_register_pause_cb,
7929 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
7930 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
7931 };
7932 
7933 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
7934 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7935 };
7936 
7937 #ifdef IPA_OFFLOAD
7938 static struct cdp_ipa_ops dp_ops_ipa = {
7939 	.ipa_get_resource = dp_ipa_get_resource,
7940 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
7941 	.ipa_op_response = dp_ipa_op_response,
7942 	.ipa_register_op_cb = dp_ipa_register_op_cb,
7943 	.ipa_get_stat = dp_ipa_get_stat,
7944 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
7945 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
7946 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
7947 	.ipa_setup = dp_ipa_setup,
7948 	.ipa_cleanup = dp_ipa_cleanup,
7949 	.ipa_setup_iface = dp_ipa_setup_iface,
7950 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
7951 	.ipa_enable_pipes = dp_ipa_enable_pipes,
7952 	.ipa_disable_pipes = dp_ipa_disable_pipes,
7953 	.ipa_set_perf_level = dp_ipa_set_perf_level
7954 };
7955 #endif
7956 
7957 static struct cdp_bus_ops dp_ops_bus = {
7958 	.bus_suspend = dp_bus_suspend,
7959 	.bus_resume = dp_bus_resume
7960 };
7961 
7962 static struct cdp_ocb_ops dp_ops_ocb = {
7963 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7964 };
7965 
7966 
7967 static struct cdp_throttle_ops dp_ops_throttle = {
7968 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7969 };
7970 
7971 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
7972 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7973 };
7974 
7975 static struct cdp_cfg_ops dp_ops_cfg = {
7976 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7977 };
7978 
7979 /*
7980  * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
7981  * @dev: physical device instance
7982  * @peer_mac_addr: peer mac address
7983  * @local_id: local id for the peer
7984  * @debug_id: to track enum peer access
7985  *
7986  * Return: peer instance pointer
7987  */
7988 static inline void *
7989 dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, u8 *peer_mac_addr,
7990 			     u8 *local_id, enum peer_debug_id_type debug_id)
7991 {
7992 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
7993 	struct dp_peer *peer;
7994 
7995 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
7996 
7997 	if (!peer)
7998 		return NULL;
7999 
8000 	*local_id = peer->local_id;
8001 	DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
8002 
8003 	return peer;
8004 }
8005 
8006 /*
8007  * dp_peer_release_ref - release peer ref count
8008  * @peer: peer handle
8009  * @debug_id: to track enum peer access
8010  *
8011  * Return: None
8012  */
8013 static inline
8014 void dp_peer_release_ref(void *peer, enum peer_debug_id_type debug_id)
8015 {
8016 	dp_peer_unref_delete(peer);
8017 }
8018 
8019 static struct cdp_peer_ops dp_ops_peer = {
8020 	.register_peer = dp_register_peer,
8021 	.clear_peer = dp_clear_peer,
8022 	.find_peer_by_addr = dp_find_peer_by_addr,
8023 	.find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
8024 	.peer_get_ref_by_addr = dp_peer_get_ref_find_by_addr,
8025 	.peer_release_ref = dp_peer_release_ref,
8026 	.local_peer_id = dp_local_peer_id,
8027 	.peer_find_by_local_id = dp_peer_find_by_local_id,
8028 	.peer_state_update = dp_peer_state_update,
8029 	.get_vdevid = dp_get_vdevid,
8030 	.get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
8031 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
8032 	.get_vdev_for_peer = dp_get_vdev_for_peer,
8033 	.get_peer_state = dp_get_peer_state,
8034 };
8035 #endif
8036 
8037 static struct cdp_ops dp_txrx_ops = {
8038 	.cmn_drv_ops = &dp_ops_cmn,
8039 	.ctrl_ops = &dp_ops_ctrl,
8040 	.me_ops = &dp_ops_me,
8041 	.mon_ops = &dp_ops_mon,
8042 	.host_stats_ops = &dp_ops_host_stats,
8043 	.wds_ops = &dp_ops_wds,
8044 	.raw_ops = &dp_ops_raw,
8045 #ifdef CONFIG_WIN
8046 	.pflow_ops = &dp_ops_pflow,
8047 #endif /* CONFIG_WIN */
8048 #ifndef CONFIG_WIN
8049 	.misc_ops = &dp_ops_misc,
8050 	.cfg_ops = &dp_ops_cfg,
8051 	.flowctl_ops = &dp_ops_flowctl,
8052 	.l_flowctl_ops = &dp_ops_l_flowctl,
8053 #ifdef IPA_OFFLOAD
8054 	.ipa_ops = &dp_ops_ipa,
8055 #endif
8056 	.bus_ops = &dp_ops_bus,
8057 	.ocb_ops = &dp_ops_ocb,
8058 	.peer_ops = &dp_ops_peer,
8059 	.throttle_ops = &dp_ops_throttle,
8060 	.mob_stats_ops = &dp_ops_mob_stats,
8061 #endif
8062 };
8063 
8064 /*
8065  * dp_soc_set_txrx_ring_map()
8066  * @dp_soc: DP handler for soc
8067  *
8068  * Return: Void
8069  */
8070 static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
8071 {
8072 	uint32_t i;
8073 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
8074 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_DEFAULT_MAP][i];
8075 	}
8076 }
8077 
8078 #ifdef QCA_WIFI_QCA8074
8079 /**
8080  * dp_soc_attach_wifi3() - Attach txrx SOC
8081  * @ctrl_psoc:	Opaque SOC handle from control plane
8082  * @htc_handle:	Opaque HTC handle
8083  * @hif_handle:	Opaque HIF handle
8084  * @qdf_osdev:	QDF device
8085  * @ol_ops:	Offload Operations
8086  * @device_id:	Device ID
8087  *
8088  * Return: DP SOC handle on success, NULL on failure
8089  */
8090 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
8091 			  HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
8092 			  struct ol_if_ops *ol_ops, uint16_t device_id)
8093 {
8094 	struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
8095 	int target_type;
8096 
8097 	if (!soc) {
8098 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8099 			FL("DP SOC memory allocation failed"));
8100 		goto fail0;
8101 	}
8102 
8103 	soc->device_id = device_id;
8104 	soc->cdp_soc.ops = &dp_txrx_ops;
8105 	soc->cdp_soc.ol_ops = ol_ops;
8106 	soc->ctrl_psoc = ctrl_psoc;
8107 	soc->osdev = qdf_osdev;
8108 	soc->hif_handle = hif_handle;
8109 
8110 	soc->hal_soc = hif_get_hal_handle(hif_handle);
8111 	soc->htt_handle = htt_soc_attach(soc, ctrl_psoc, htc_handle,
8112 		soc->hal_soc, qdf_osdev);
8113 	if (!soc->htt_handle) {
8114 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8115 			FL("HTT attach failed"));
8116 		goto fail1;
8117 	}
8118 
8119 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
8120 	if (!soc->wlan_cfg_ctx) {
8121 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8122 			FL("wlan_cfg_soc_attach failed"));
8123 		goto fail2;
8124 	}
8125 	target_type = hal_get_target_type(soc->hal_soc);
8126 	switch (target_type) {
8127 	case TARGET_TYPE_QCA6290:
8128 #ifdef QCA_WIFI_QCA6390
8129 	case TARGET_TYPE_QCA6390:
8130 #endif
8131 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
8132 					       REO_DST_RING_SIZE_QCA6290);
8133 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
8134 		break;
8135 	case TARGET_TYPE_QCA8074:
8136 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
8137 					       REO_DST_RING_SIZE_QCA8074);
8138 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
8139 		soc->hw_nac_monitor_support = 1;
8140 		break;
8141 	case TARGET_TYPE_QCA8074V2:
8142 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
8143 					       REO_DST_RING_SIZE_QCA8074);
8144 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
8145 		soc->hw_nac_monitor_support = 1;
8146 		break;
8147 	default:
8148 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
8149 		qdf_assert_always(0);
8150 		break;
8151 	}
8152 
8153 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
8154 			     cfg_get(ctrl_psoc, CFG_DP_RX_HASH));
8155 	soc->cce_disable = false;
8156 
8157 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
8158 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
8159 				CDP_CFG_MAX_PEER_ID);
8160 
8161 		if (ret != -EINVAL) {
8162 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
8163 		}
8164 
8165 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
8166 				CDP_CFG_CCE_DISABLE);
8167 		if (ret == 1)
8168 			soc->cce_disable = true;
8169 	}
8170 
8171 	qdf_spinlock_create(&soc->peer_ref_mutex);
8172 
8173 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
8174 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
8175 
8176 	/* fill the tx/rx cpu ring map*/
8177 	dp_soc_set_txrx_ring_map(soc);
8178 
8179 	qdf_spinlock_create(&soc->htt_stats.lock);
8180 	/* initialize work queue for stats processing */
8181 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
8182 
8183 	/*Initialize inactivity timer for wifison */
8184 	dp_init_inact_timer(soc);
8185 
8186 	return (void *)soc;
8187 
8188 fail2:
8189 	htt_soc_detach(soc->htt_handle);
8190 fail1:
8191 	qdf_mem_free(soc);
8192 fail0:
8193 	return NULL;
8194 }
8195 #endif
8196 
8197 /*
8198  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
8199  *
8200  * @soc: handle to DP soc
8201  * @mac_id: MAC id
8202  *
8203  * Return: Return pdev corresponding to MAC
8204  */
8205 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
8206 {
8207 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
8208 		return soc->pdev_list[mac_id];
8209 
8210 	/* Typically for MCL as there only 1 PDEV*/
8211 	return soc->pdev_list[0];
8212 }
8213 
8214 /*
8215  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
8216  * @soc:		DP SoC context
8217  * @max_mac_rings:	No of MAC rings
8218  *
8219  * Return: None
8220  */
8221 static
8222 void dp_is_hw_dbs_enable(struct dp_soc *soc,
8223 				int *max_mac_rings)
8224 {
8225 	bool dbs_enable = false;
8226 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
8227 		dbs_enable = soc->cdp_soc.ol_ops->
8228 		is_hw_dbs_2x2_capable(soc->ctrl_psoc);
8229 
8230 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
8231 }
8232 
8233 /*
8234 * dp_set_pktlog_wifi3() - attach txrx vdev
8235 * @pdev: Datapath PDEV handle
8236 * @event: which event's notifications are being subscribed to
8237 * @enable: WDI event subscribe or not. (True or False)
8238 *
8239 * Return: Success, NULL on failure
8240 */
8241 #ifdef WDI_EVENT_ENABLE
8242 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
8243 	bool enable)
8244 {
8245 	struct dp_soc *soc = pdev->soc;
8246 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
8247 	int max_mac_rings = wlan_cfg_get_num_mac_rings
8248 					(pdev->wlan_cfg_ctx);
8249 	uint8_t mac_id = 0;
8250 
8251 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
8252 
8253 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
8254 			FL("Max_mac_rings %d "),
8255 			max_mac_rings);
8256 
8257 	if (enable) {
8258 		switch (event) {
8259 		case WDI_EVENT_RX_DESC:
8260 			if (pdev->monitor_vdev) {
8261 				/* Nothing needs to be done if monitor mode is
8262 				 * enabled
8263 				 */
8264 				return 0;
8265 			}
8266 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
8267 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
8268 				htt_tlv_filter.mpdu_start = 1;
8269 				htt_tlv_filter.msdu_start = 1;
8270 				htt_tlv_filter.msdu_end = 1;
8271 				htt_tlv_filter.mpdu_end = 1;
8272 				htt_tlv_filter.packet_header = 1;
8273 				htt_tlv_filter.attention = 1;
8274 				htt_tlv_filter.ppdu_start = 1;
8275 				htt_tlv_filter.ppdu_end = 1;
8276 				htt_tlv_filter.ppdu_end_user_stats = 1;
8277 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
8278 				htt_tlv_filter.ppdu_end_status_done = 1;
8279 				htt_tlv_filter.enable_fp = 1;
8280 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8281 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8282 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8283 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8284 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8285 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
8286 
8287 				for (mac_id = 0; mac_id < max_mac_rings;
8288 								mac_id++) {
8289 					int mac_for_pdev =
8290 						dp_get_mac_id_for_pdev(mac_id,
8291 								pdev->pdev_id);
8292 
8293 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8294 					 mac_for_pdev,
8295 					 pdev->rxdma_mon_status_ring[mac_id]
8296 					 .hal_srng,
8297 					 RXDMA_MONITOR_STATUS,
8298 					 RX_BUFFER_SIZE,
8299 					 &htt_tlv_filter);
8300 
8301 				}
8302 
8303 				if (soc->reap_timer_init)
8304 					qdf_timer_mod(&soc->mon_reap_timer,
8305 					DP_INTR_POLL_TIMER_MS);
8306 			}
8307 			break;
8308 
8309 		case WDI_EVENT_LITE_RX:
8310 			if (pdev->monitor_vdev) {
8311 				/* Nothing needs to be done if monitor mode is
8312 				 * enabled
8313 				 */
8314 				return 0;
8315 			}
8316 
8317 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
8318 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
8319 
8320 				htt_tlv_filter.ppdu_start = 1;
8321 				htt_tlv_filter.ppdu_end = 1;
8322 				htt_tlv_filter.ppdu_end_user_stats = 1;
8323 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
8324 				htt_tlv_filter.ppdu_end_status_done = 1;
8325 				htt_tlv_filter.mpdu_start = 1;
8326 				htt_tlv_filter.enable_fp = 1;
8327 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8328 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8329 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8330 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8331 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8332 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
8333 
8334 				for (mac_id = 0; mac_id < max_mac_rings;
8335 								mac_id++) {
8336 					int mac_for_pdev =
8337 						dp_get_mac_id_for_pdev(mac_id,
8338 								pdev->pdev_id);
8339 
8340 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8341 					mac_for_pdev,
8342 					pdev->rxdma_mon_status_ring[mac_id]
8343 					.hal_srng,
8344 					RXDMA_MONITOR_STATUS,
8345 					RX_BUFFER_SIZE_PKTLOG_LITE,
8346 					&htt_tlv_filter);
8347 				}
8348 
8349 				if (soc->reap_timer_init)
8350 					qdf_timer_mod(&soc->mon_reap_timer,
8351 					DP_INTR_POLL_TIMER_MS);
8352 			}
8353 			break;
8354 
8355 		case WDI_EVENT_LITE_T2H:
8356 			if (pdev->monitor_vdev) {
8357 				/* Nothing needs to be done if monitor mode is
8358 				 * enabled
8359 				 */
8360 				return 0;
8361 			}
8362 
8363 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
8364 				int mac_for_pdev = dp_get_mac_id_for_pdev(
8365 							mac_id,	pdev->pdev_id);
8366 
8367 				pdev->pktlog_ppdu_stats = true;
8368 				dp_h2t_cfg_stats_msg_send(pdev,
8369 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
8370 					mac_for_pdev);
8371 			}
8372 			break;
8373 
8374 		default:
8375 			/* Nothing needs to be done for other pktlog types */
8376 			break;
8377 		}
8378 	} else {
8379 		switch (event) {
8380 		case WDI_EVENT_RX_DESC:
8381 		case WDI_EVENT_LITE_RX:
8382 			if (pdev->monitor_vdev) {
8383 				/* Nothing needs to be done if monitor mode is
8384 				 * enabled
8385 				 */
8386 				return 0;
8387 			}
8388 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
8389 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
8390 
8391 				for (mac_id = 0; mac_id < max_mac_rings;
8392 								mac_id++) {
8393 					int mac_for_pdev =
8394 						dp_get_mac_id_for_pdev(mac_id,
8395 								pdev->pdev_id);
8396 
8397 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8398 					  mac_for_pdev,
8399 					  pdev->rxdma_mon_status_ring[mac_id]
8400 					  .hal_srng,
8401 					  RXDMA_MONITOR_STATUS,
8402 					  RX_BUFFER_SIZE,
8403 					  &htt_tlv_filter);
8404 				}
8405 
8406 				if (soc->reap_timer_init)
8407 					qdf_timer_stop(&soc->mon_reap_timer);
8408 			}
8409 			break;
8410 		case WDI_EVENT_LITE_T2H:
8411 			if (pdev->monitor_vdev) {
8412 				/* Nothing needs to be done if monitor mode is
8413 				 * enabled
8414 				 */
8415 				return 0;
8416 			}
8417 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
8418 			 * passing value 0. Once these macros will define in htt
8419 			 * header file will use proper macros
8420 			*/
8421 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
8422 				int mac_for_pdev =
8423 						dp_get_mac_id_for_pdev(mac_id,
8424 								pdev->pdev_id);
8425 
8426 				pdev->pktlog_ppdu_stats = false;
8427 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
8428 					dp_h2t_cfg_stats_msg_send(pdev, 0,
8429 								mac_for_pdev);
8430 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
8431 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
8432 								mac_for_pdev);
8433 				} else if (pdev->enhanced_stats_en) {
8434 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
8435 								mac_for_pdev);
8436 				}
8437 			}
8438 
8439 			break;
8440 		default:
8441 			/* Nothing needs to be done for other pktlog types */
8442 			break;
8443 		}
8444 	}
8445 	return 0;
8446 }
8447 #endif
8448