xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 27d564647e9b50e713c60b0d7e5ea2a9b0a3ae74)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_htt.h"
31 #include "dp_types.h"
32 #include "dp_internal.h"
33 #include "dp_tx.h"
34 #include "dp_tx_desc.h"
35 #include "dp_rx.h"
36 #include <cdp_txrx_handle.h>
37 #include <wlan_cfg.h>
38 #include "cdp_txrx_cmn_struct.h"
39 #include "cdp_txrx_stats_struct.h"
40 #include "cdp_txrx_cmn_reg.h"
41 #include <qdf_util.h>
42 #include "dp_peer.h"
43 #include "dp_rx_mon.h"
44 #include "htt_stats.h"
45 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
46 #include "cfg_ucfg_api.h"
47 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
48 #include "cdp_txrx_flow_ctrl_v2.h"
49 #else
50 static inline void
51 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
52 {
53 	return;
54 }
55 #endif
56 #include "dp_ipa.h"
57 
58 #include "dp_cal_client_api.h"
59 
60 #ifdef CONFIG_MCL
61 #ifndef REMOVE_PKT_LOG
62 #include <pktlog_ac_api.h>
63 #include <pktlog_ac.h>
64 #endif
65 #endif
66 static void dp_pktlogmod_exit(struct dp_pdev *handle);
67 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
68 				uint8_t *peer_mac_addr,
69 				struct cdp_ctrl_objmgr_peer *ctrl_peer);
70 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
71 static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
72 static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
73 
74 #define DP_INTR_POLL_TIMER_MS	10
75 #define DP_WDS_AGING_TIMER_DEFAULT_MS	120000
76 #define DP_MCS_LENGTH (6*MAX_MCS)
77 #define DP_NSS_LENGTH (6*SS_COUNT)
78 #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
79 #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
80 #define DP_MAX_MCS_STRING_LEN 30
81 #define DP_CURR_FW_STATS_AVAIL 19
82 #define DP_HTT_DBG_EXT_STATS_MAX 256
83 #define DP_MAX_SLEEP_TIME 100
84 
85 #ifdef IPA_OFFLOAD
86 /* Exclude IPA rings from the interrupt context */
87 #define TX_RING_MASK_VAL	0xb
88 #define RX_RING_MASK_VAL	0x7
89 #else
90 #define TX_RING_MASK_VAL	0xF
91 #define RX_RING_MASK_VAL	0xF
92 #endif
93 
94 #define STR_MAXLEN	64
95 
96 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
97 
98 /* PPDU stats mask sent to FW to enable enhanced stats */
99 #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
100 /* PPDU stats mask sent to FW to support debug sniffer feature */
101 #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
102 /* PPDU stats mask sent to FW to support BPR feature*/
103 #define DP_PPDU_STATS_CFG_BPR 0x2000
104 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
105 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
106 				   DP_PPDU_STATS_CFG_ENH_STATS)
107 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
108 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
109 				      DP_PPDU_TXLITE_STATS_BITMASK_CFG)
110 
111 #define RNG_ERR		"SRNG setup failed for"
112 /**
113  * default_dscp_tid_map - Default DSCP-TID mapping
114  *
115  * DSCP        TID
116  * 000000      0
117  * 001000      1
118  * 010000      2
119  * 011000      3
120  * 100000      4
121  * 101000      5
122  * 110000      6
123  * 111000      7
124  */
125 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
126 	0, 0, 0, 0, 0, 0, 0, 0,
127 	1, 1, 1, 1, 1, 1, 1, 1,
128 	2, 2, 2, 2, 2, 2, 2, 2,
129 	3, 3, 3, 3, 3, 3, 3, 3,
130 	4, 4, 4, 4, 4, 4, 4, 4,
131 	5, 5, 5, 5, 5, 5, 5, 5,
132 	6, 6, 6, 6, 6, 6, 6, 6,
133 	7, 7, 7, 7, 7, 7, 7, 7,
134 };
135 
136 /*
137  * struct dp_rate_debug
138  *
139  * @mcs_type: print string for a given mcs
140  * @valid: valid mcs rate?
141  */
142 struct dp_rate_debug {
143 	char mcs_type[DP_MAX_MCS_STRING_LEN];
144 	uint8_t valid;
145 };
146 
147 #define MCS_VALID 1
148 #define MCS_INVALID 0
149 
150 static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
151 
152 	{
153 		{"OFDM 48 Mbps", MCS_VALID},
154 		{"OFDM 24 Mbps", MCS_VALID},
155 		{"OFDM 12 Mbps", MCS_VALID},
156 		{"OFDM 6 Mbps ", MCS_VALID},
157 		{"OFDM 54 Mbps", MCS_VALID},
158 		{"OFDM 36 Mbps", MCS_VALID},
159 		{"OFDM 18 Mbps", MCS_VALID},
160 		{"OFDM 9 Mbps ", MCS_VALID},
161 		{"INVALID ", MCS_INVALID},
162 		{"INVALID ", MCS_INVALID},
163 		{"INVALID ", MCS_INVALID},
164 		{"INVALID ", MCS_INVALID},
165 		{"INVALID ", MCS_VALID},
166 	},
167 	{
168 		{"CCK 11 Mbps Long  ", MCS_VALID},
169 		{"CCK 5.5 Mbps Long ", MCS_VALID},
170 		{"CCK 2 Mbps Long   ", MCS_VALID},
171 		{"CCK 1 Mbps Long   ", MCS_VALID},
172 		{"CCK 11 Mbps Short ", MCS_VALID},
173 		{"CCK 5.5 Mbps Short", MCS_VALID},
174 		{"CCK 2 Mbps Short  ", MCS_VALID},
175 		{"INVALID ", MCS_INVALID},
176 		{"INVALID ", MCS_INVALID},
177 		{"INVALID ", MCS_INVALID},
178 		{"INVALID ", MCS_INVALID},
179 		{"INVALID ", MCS_INVALID},
180 		{"INVALID ", MCS_VALID},
181 	},
182 	{
183 		{"HT MCS 0 (BPSK 1/2)  ", MCS_VALID},
184 		{"HT MCS 1 (QPSK 1/2)  ", MCS_VALID},
185 		{"HT MCS 2 (QPSK 3/4)  ", MCS_VALID},
186 		{"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
187 		{"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
188 		{"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
189 		{"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
190 		{"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
191 		{"INVALID ", MCS_INVALID},
192 		{"INVALID ", MCS_INVALID},
193 		{"INVALID ", MCS_INVALID},
194 		{"INVALID ", MCS_INVALID},
195 		{"INVALID ", MCS_VALID},
196 	},
197 	{
198 		{"VHT MCS 0 (BPSK 1/2)     ", MCS_VALID},
199 		{"VHT MCS 1 (QPSK 1/2)     ", MCS_VALID},
200 		{"VHT MCS 2 (QPSK 3/4)     ", MCS_VALID},
201 		{"VHT MCS 3 (16-QAM 1/2)   ", MCS_VALID},
202 		{"VHT MCS 4 (16-QAM 3/4)   ", MCS_VALID},
203 		{"VHT MCS 5 (64-QAM 2/3)   ", MCS_VALID},
204 		{"VHT MCS 6 (64-QAM 3/4)   ", MCS_VALID},
205 		{"VHT MCS 7 (64-QAM 5/6)   ", MCS_VALID},
206 		{"VHT MCS 8 (256-QAM 3/4)  ", MCS_VALID},
207 		{"VHT MCS 9 (256-QAM 5/6)  ", MCS_VALID},
208 		{"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
209 		{"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
210 		{"INVALID ", MCS_VALID},
211 	},
212 	{
213 		{"HE MCS 0 (BPSK 1/2)     ", MCS_VALID},
214 		{"HE MCS 1 (QPSK 1/2)     ", MCS_VALID},
215 		{"HE MCS 2 (QPSK 3/4)     ", MCS_VALID},
216 		{"HE MCS 3 (16-QAM 1/2)   ", MCS_VALID},
217 		{"HE MCS 4 (16-QAM 3/4)   ", MCS_VALID},
218 		{"HE MCS 5 (64-QAM 2/3)   ", MCS_VALID},
219 		{"HE MCS 6 (64-QAM 3/4)   ", MCS_VALID},
220 		{"HE MCS 7 (64-QAM 5/6)   ", MCS_VALID},
221 		{"HE MCS 8 (256-QAM 3/4)  ", MCS_VALID},
222 		{"HE MCS 9 (256-QAM 5/6)  ", MCS_VALID},
223 		{"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
224 		{"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
225 		{"INVALID ", MCS_VALID},
226 	}
227 };
228 
229 /**
230  * @brief Cpu ring map types
231  */
232 enum dp_cpu_ring_map_types {
233 	DP_DEFAULT_MAP,
234 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
235 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
236 	DP_NSS_ALL_RADIO_OFFLOADED_MAP,
237 	DP_CPU_RING_MAP_MAX
238 };
239 
240 /**
241  * @brief Cpu to tx ring map
242  */
243 static uint8_t dp_cpu_ring_map[DP_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
244 	{0x0, 0x1, 0x2, 0x0},
245 	{0x1, 0x2, 0x1, 0x2},
246 	{0x0, 0x2, 0x0, 0x2},
247 	{0x2, 0x2, 0x2, 0x2}
248 };
249 
250 /**
251  * @brief Select the type of statistics
252  */
253 enum dp_stats_type {
254 	STATS_FW = 0,
255 	STATS_HOST = 1,
256 	STATS_TYPE_MAX = 2,
257 };
258 
259 /**
260  * @brief General Firmware statistics options
261  *
262  */
263 enum dp_fw_stats {
264 	TXRX_FW_STATS_INVALID	= -1,
265 };
266 
267 /**
268  * dp_stats_mapping_table - Firmware and Host statistics
269  * currently supported
270  */
271 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
272 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
273 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
274 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
275 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
276 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
277 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
278 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
279 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
280 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
281 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
282 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
283 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
284 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
285 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
286 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
287 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
288 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
289 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
290 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
291 	/* Last ENUM for HTT FW STATS */
292 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
293 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
294 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
295 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
296 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
297 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
298 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
299 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
300 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
301 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
302 };
303 
304 /* MCL specific functions */
305 #ifdef CONFIG_MCL
306 /**
307  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
308  * @soc: pointer to dp_soc handle
309  * @intr_ctx_num: interrupt context number for which mon mask is needed
310  *
311  * For MCL, monitor mode rings are being processed in timer contexts (polled).
312  * This function is returning 0, since in interrupt mode(softirq based RX),
313  * we donot want to process monitor mode rings in a softirq.
314  *
315  * So, in case packet log is enabled for SAP/STA/P2P modes,
316  * regular interrupt processing will not process monitor mode rings. It would be
317  * done in a separate timer context.
318  *
319  * Return: 0
320  */
321 static inline
322 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
323 {
324 	return 0;
325 }
326 
327 /*
328  * dp_service_mon_rings()- timer to reap monitor rings
329  * reqd as we are not getting ppdu end interrupts
330  * @arg: SoC Handle
331  *
332  * Return:
333  *
334  */
335 static void dp_service_mon_rings(void *arg)
336 {
337 	struct dp_soc *soc = (struct dp_soc *)arg;
338 	int ring = 0, work_done, mac_id;
339 	struct dp_pdev *pdev = NULL;
340 
341 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
342 		pdev = soc->pdev_list[ring];
343 		if (!pdev)
344 			continue;
345 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
346 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
347 								pdev->pdev_id);
348 			work_done = dp_mon_process(soc, mac_for_pdev,
349 						   QCA_NAPI_BUDGET);
350 
351 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
352 				  FL("Reaped %d descs from Monitor rings"),
353 				  work_done);
354 		}
355 	}
356 
357 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
358 }
359 
360 #ifndef REMOVE_PKT_LOG
361 /**
362  * dp_pkt_log_init() - API to initialize packet log
363  * @ppdev: physical device handle
364  * @scn: HIF context
365  *
366  * Return: none
367  */
368 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
369 {
370 	struct dp_pdev *handle = (struct dp_pdev *)ppdev;
371 
372 	if (handle->pkt_log_init) {
373 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
374 			  "%s: Packet log not initialized", __func__);
375 		return;
376 	}
377 
378 	pktlog_sethandle(&handle->pl_dev, scn);
379 	pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
380 
381 	if (pktlogmod_init(scn)) {
382 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
383 			  "%s: pktlogmod_init failed", __func__);
384 		handle->pkt_log_init = false;
385 	} else {
386 		handle->pkt_log_init = true;
387 	}
388 }
389 
390 /**
391  * dp_pkt_log_con_service() - connect packet log service
392  * @ppdev: physical device handle
393  * @scn: device context
394  *
395  * Return: none
396  */
397 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
398 {
399 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
400 
401 	dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
402 	pktlog_htc_attach();
403 }
404 
405 /**
406  * dp_pktlogmod_exit() - API to cleanup pktlog info
407  * @handle: Pdev handle
408  *
409  * Return: none
410  */
411 static void dp_pktlogmod_exit(struct dp_pdev *handle)
412 {
413 	void *scn = (void *)handle->soc->hif_handle;
414 
415 	if (!scn) {
416 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
417 			  "%s: Invalid hif(scn) handle", __func__);
418 		return;
419 	}
420 
421 	pktlogmod_exit(scn);
422 	handle->pkt_log_init = false;
423 }
424 #endif
425 #else
426 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
427 
428 /**
429  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
430  * @soc: pointer to dp_soc handle
431  * @intr_ctx_num: interrupt context number for which mon mask is needed
432  *
433  * Return: mon mask value
434  */
435 static inline
436 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
437 {
438 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
439 }
440 #endif
441 
442 /**
443  * dp_get_dp_vdev_from_cdp_vdev() - get dp_vdev from cdp_vdev by type-casting
444  * @cdp_opaque_vdev: pointer to cdp_vdev
445  *
446  * Return: pointer to dp_vdev
447  */
448 static
449 struct dp_vdev * dp_get_dp_vdev_from_cdp_vdev(struct cdp_vdev *cdp_opaque_vdev)
450 {
451 	return (struct dp_vdev *)cdp_opaque_vdev;
452 }
453 
454 
455 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
456 					struct cdp_peer *peer_hdl,
457 					uint8_t *mac_addr,
458 					enum cdp_txrx_ast_entry_type type,
459 					uint32_t flags)
460 {
461 
462 	return dp_peer_add_ast((struct dp_soc *)soc_hdl,
463 				(struct dp_peer *)peer_hdl,
464 				mac_addr,
465 				type,
466 				flags);
467 }
468 
469 static void dp_peer_del_ast_wifi3(struct cdp_soc_t *soc_hdl,
470 					 void *ast_entry_hdl)
471 {
472 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
473 	qdf_spin_lock_bh(&soc->ast_lock);
474 	dp_peer_del_ast((struct dp_soc *)soc_hdl,
475 			(struct dp_ast_entry *)ast_entry_hdl);
476 	qdf_spin_unlock_bh(&soc->ast_lock);
477 }
478 
479 
480 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
481 						struct cdp_peer *peer_hdl,
482 						uint8_t *wds_macaddr,
483 						uint32_t flags)
484 {
485 	int status = -1;
486 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
487 	struct dp_ast_entry  *ast_entry = NULL;
488 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
489 
490 	qdf_spin_lock_bh(&soc->ast_lock);
491 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
492 						    peer->vdev->pdev->pdev_id);
493 
494 	if (ast_entry) {
495 		status = dp_peer_update_ast(soc,
496 					    peer,
497 					    ast_entry, flags);
498 	}
499 
500 	qdf_spin_unlock_bh(&soc->ast_lock);
501 
502 	return status;
503 }
504 
505 /*
506  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
507  * @soc_handle:		Datapath SOC handle
508  * @wds_macaddr:	WDS entry MAC Address
509  * Return: None
510  */
511 static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
512 				   uint8_t *wds_macaddr, void *vdev_handle)
513 {
514 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
515 	struct dp_ast_entry *ast_entry = NULL;
516 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
517 
518 	qdf_spin_lock_bh(&soc->ast_lock);
519 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
520 						    vdev->pdev->pdev_id);
521 
522 	if (ast_entry) {
523 		if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
524 			(ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
525 			(ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS)) {
526 			ast_entry->is_active = TRUE;
527 		}
528 	}
529 
530 	qdf_spin_unlock_bh(&soc->ast_lock);
531 }
532 
533 /*
534  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
535  * @soc:		Datapath SOC handle
536  *
537  * Return: None
538  */
539 static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
540 					 void *vdev_hdl)
541 {
542 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
543 	struct dp_pdev *pdev;
544 	struct dp_vdev *vdev;
545 	struct dp_peer *peer;
546 	struct dp_ast_entry *ase, *temp_ase;
547 	int i;
548 
549 	qdf_spin_lock_bh(&soc->ast_lock);
550 
551 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
552 		pdev = soc->pdev_list[i];
553 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
554 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
555 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
556 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
557 					if ((ase->type ==
558 						CDP_TXRX_AST_TYPE_STATIC) ||
559 						(ase->type ==
560 						CDP_TXRX_AST_TYPE_SELF) ||
561 						(ase->type ==
562 						CDP_TXRX_AST_TYPE_STA_BSS))
563 						continue;
564 					ase->is_active = TRUE;
565 				}
566 			}
567 		}
568 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
569 	}
570 
571 	qdf_spin_unlock_bh(&soc->ast_lock);
572 }
573 
574 /*
575  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
576  * @soc:		Datapath SOC handle
577  *
578  * Return: None
579  */
580 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
581 {
582 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
583 	struct dp_pdev *pdev;
584 	struct dp_vdev *vdev;
585 	struct dp_peer *peer;
586 	struct dp_ast_entry *ase, *temp_ase;
587 	int i;
588 
589 	qdf_spin_lock_bh(&soc->ast_lock);
590 
591 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
592 		pdev = soc->pdev_list[i];
593 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
594 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
595 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
596 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
597 					if ((ase->type ==
598 						CDP_TXRX_AST_TYPE_STATIC) ||
599 						(ase->type ==
600 						 CDP_TXRX_AST_TYPE_SELF) ||
601 						(ase->type ==
602 						 CDP_TXRX_AST_TYPE_STA_BSS))
603 						continue;
604 					dp_peer_del_ast(soc, ase);
605 				}
606 			}
607 		}
608 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
609 	}
610 
611 	qdf_spin_unlock_bh(&soc->ast_lock);
612 }
613 
614 static void *dp_peer_ast_hash_find_soc_wifi3(struct cdp_soc_t *soc_hdl,
615 					     uint8_t *ast_mac_addr)
616 {
617 	struct dp_ast_entry *ast_entry;
618 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
619 
620 	qdf_spin_lock_bh(&soc->ast_lock);
621 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
622 	qdf_spin_unlock_bh(&soc->ast_lock);
623 	return (void *)ast_entry;
624 }
625 
626 static void *dp_peer_ast_hash_find_by_pdevid_wifi3(struct cdp_soc_t *soc_hdl,
627 						   uint8_t *ast_mac_addr,
628 						   uint8_t pdev_id)
629 {
630 	struct dp_ast_entry *ast_entry;
631 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
632 
633 	qdf_spin_lock_bh(&soc->ast_lock);
634 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, pdev_id);
635 	qdf_spin_unlock_bh(&soc->ast_lock);
636 	return (void *)ast_entry;
637 }
638 
639 static uint8_t dp_peer_ast_get_pdev_id_wifi3(struct cdp_soc_t *soc_hdl,
640 							void *ast_entry_hdl)
641 {
642 	return dp_peer_ast_get_pdev_id((struct dp_soc *)soc_hdl,
643 					(struct dp_ast_entry *)ast_entry_hdl);
644 }
645 
646 static uint8_t dp_peer_ast_get_next_hop_wifi3(struct cdp_soc_t *soc_hdl,
647 							void *ast_entry_hdl)
648 {
649 	return dp_peer_ast_get_next_hop((struct dp_soc *)soc_hdl,
650 					(struct dp_ast_entry *)ast_entry_hdl);
651 }
652 
653 static void dp_peer_ast_set_type_wifi3(
654 					struct cdp_soc_t *soc_hdl,
655 					void *ast_entry_hdl,
656 					enum cdp_txrx_ast_entry_type type)
657 {
658 	dp_peer_ast_set_type((struct dp_soc *)soc_hdl,
659 				(struct dp_ast_entry *)ast_entry_hdl,
660 				type);
661 }
662 
663 static enum cdp_txrx_ast_entry_type dp_peer_ast_get_type_wifi3(
664 					struct cdp_soc_t *soc_hdl,
665 					void *ast_entry_hdl)
666 {
667 	return ((struct dp_ast_entry *)ast_entry_hdl)->type;
668 }
669 
670 #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND)
671 void dp_peer_ast_set_cp_ctx_wifi3(struct cdp_soc_t *soc_handle,
672 				  void *ast_entry,
673 				  void *cp_ctx)
674 {
675 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
676 
677 	qdf_spin_lock_bh(&soc->ast_lock);
678 	dp_peer_ast_set_cp_ctx(soc,
679 			       (struct dp_ast_entry *)ast_entry, cp_ctx);
680 	qdf_spin_unlock_bh(&soc->ast_lock);
681 }
682 
683 void *dp_peer_ast_get_cp_ctx_wifi3(struct cdp_soc_t *soc_handle,
684 				   void *ast_entry)
685 {
686 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
687 	void *cp_ctx = NULL;
688 
689 	qdf_spin_lock_bh(&soc->ast_lock);
690 	cp_ctx = dp_peer_ast_get_cp_ctx(soc,
691 					(struct dp_ast_entry *)ast_entry);
692 	qdf_spin_unlock_bh(&soc->ast_lock);
693 
694 	return cp_ctx;
695 }
696 
697 bool dp_peer_ast_get_wmi_sent_wifi3(struct cdp_soc_t *soc_handle,
698 				    void *ast_entry)
699 {
700 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
701 	bool wmi_sent = false;
702 
703 	qdf_spin_lock_bh(&soc->ast_lock);
704 	wmi_sent = dp_peer_ast_get_wmi_sent(soc,
705 					    (struct dp_ast_entry *)ast_entry);
706 	qdf_spin_unlock_bh(&soc->ast_lock);
707 
708 	return wmi_sent;
709 }
710 
711 void dp_peer_ast_free_entry_wifi3(struct cdp_soc_t *soc_handle,
712 				  void *ast_entry)
713 {
714 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
715 
716 	qdf_spin_lock_bh(&soc->ast_lock);
717 	dp_peer_ast_free_entry(soc, (struct dp_ast_entry *)ast_entry);
718 	qdf_spin_unlock_bh(&soc->ast_lock);
719 }
720 #endif
721 
722 static struct cdp_peer *dp_peer_ast_get_peer_wifi3(
723 					struct cdp_soc_t *soc_hdl,
724 					void *ast_entry_hdl)
725 {
726 	return (struct cdp_peer *)((struct dp_ast_entry *)ast_entry_hdl)->peer;
727 }
728 
729 static uint32_t dp_peer_ast_get_nexhop_peer_id_wifi3(
730 					struct cdp_soc_t *soc_hdl,
731 					void *ast_entry_hdl)
732 {
733 	return ((struct dp_ast_entry *)ast_entry_hdl)->peer->peer_ids[0];
734 }
735 /**
736  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
737  * @ring_num: ring num of the ring being queried
738  * @grp_mask: the grp_mask array for the ring type in question.
739  *
740  * The grp_mask array is indexed by group number and the bit fields correspond
741  * to ring numbers.  We are finding which interrupt group a ring belongs to.
742  *
743  * Return: the index in the grp_mask array with the ring number.
744  * -QDF_STATUS_E_NOENT if no entry is found
745  */
746 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
747 {
748 	int ext_group_num;
749 	int mask = 1 << ring_num;
750 
751 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
752 	     ext_group_num++) {
753 		if (mask & grp_mask[ext_group_num])
754 			return ext_group_num;
755 	}
756 
757 	return -QDF_STATUS_E_NOENT;
758 }
759 
760 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
761 				       enum hal_ring_type ring_type,
762 				       int ring_num)
763 {
764 	int *grp_mask;
765 
766 	switch (ring_type) {
767 	case WBM2SW_RELEASE:
768 		/* dp_tx_comp_handler - soc->tx_comp_ring */
769 		if (ring_num < 3)
770 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
771 
772 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
773 		else if (ring_num == 3) {
774 			/* sw treats this as a separate ring type */
775 			grp_mask = &soc->wlan_cfg_ctx->
776 				int_rx_wbm_rel_ring_mask[0];
777 			ring_num = 0;
778 		} else {
779 			qdf_assert(0);
780 			return -QDF_STATUS_E_NOENT;
781 		}
782 	break;
783 
784 	case REO_EXCEPTION:
785 		/* dp_rx_err_process - &soc->reo_exception_ring */
786 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
787 	break;
788 
789 	case REO_DST:
790 		/* dp_rx_process - soc->reo_dest_ring */
791 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
792 	break;
793 
794 	case REO_STATUS:
795 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
796 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
797 	break;
798 
799 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
800 	case RXDMA_MONITOR_STATUS:
801 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
802 	case RXDMA_MONITOR_DST:
803 		/* dp_mon_process */
804 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
805 	break;
806 	case RXDMA_DST:
807 		/* dp_rxdma_err_process */
808 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
809 	break;
810 
811 	case RXDMA_BUF:
812 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
813 	break;
814 
815 	case RXDMA_MONITOR_BUF:
816 		/* TODO: support low_thresh interrupt */
817 		return -QDF_STATUS_E_NOENT;
818 	break;
819 
820 	case TCL_DATA:
821 	case TCL_CMD:
822 	case REO_CMD:
823 	case SW2WBM_RELEASE:
824 	case WBM_IDLE_LINK:
825 		/* normally empty SW_TO_HW rings */
826 		return -QDF_STATUS_E_NOENT;
827 	break;
828 
829 	case TCL_STATUS:
830 	case REO_REINJECT:
831 		/* misc unused rings */
832 		return -QDF_STATUS_E_NOENT;
833 	break;
834 
835 	case CE_SRC:
836 	case CE_DST:
837 	case CE_DST_STATUS:
838 		/* CE_rings - currently handled by hif */
839 	default:
840 		return -QDF_STATUS_E_NOENT;
841 	break;
842 	}
843 
844 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
845 }
846 
847 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
848 			      *ring_params, int ring_type, int ring_num)
849 {
850 	int msi_group_number;
851 	int msi_data_count;
852 	int ret;
853 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
854 
855 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
856 					    &msi_data_count, &msi_data_start,
857 					    &msi_irq_start);
858 
859 	if (ret)
860 		return;
861 
862 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
863 						       ring_num);
864 	if (msi_group_number < 0) {
865 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
866 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
867 			ring_type, ring_num);
868 		ring_params->msi_addr = 0;
869 		ring_params->msi_data = 0;
870 		return;
871 	}
872 
873 	if (msi_group_number > msi_data_count) {
874 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
875 			FL("2 msi_groups will share an msi; msi_group_num %d"),
876 			msi_group_number);
877 
878 		QDF_ASSERT(0);
879 	}
880 
881 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
882 
883 	ring_params->msi_addr = addr_low;
884 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
885 	ring_params->msi_data = (msi_group_number % msi_data_count)
886 		+ msi_data_start;
887 	ring_params->flags |= HAL_SRNG_MSI_INTR;
888 }
889 
890 /**
891  * dp_print_ast_stats() - Dump AST table contents
892  * @soc: Datapath soc handle
893  *
894  * return void
895  */
896 #ifdef FEATURE_AST
897 static void dp_print_ast_stats(struct dp_soc *soc)
898 {
899 	uint8_t i;
900 	uint8_t num_entries = 0;
901 	struct dp_vdev *vdev;
902 	struct dp_pdev *pdev;
903 	struct dp_peer *peer;
904 	struct dp_ast_entry *ase, *tmp_ase;
905 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
906 			"NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
907 			"DA", "HMWDS_SEC"};
908 
909 	DP_PRINT_STATS("AST Stats:");
910 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
911 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
912 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
913 	DP_PRINT_STATS("AST Table:");
914 
915 	qdf_spin_lock_bh(&soc->ast_lock);
916 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
917 		pdev = soc->pdev_list[i];
918 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
919 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
920 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
921 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
922 					DP_PRINT_STATS("%6d mac_addr = %pM"
923 							" peer_mac_addr = %pM"
924 							" type = %s"
925 							" next_hop = %d"
926 							" is_active = %d"
927 							" is_bss = %d"
928 							" ast_idx = %d"
929 							" ast_hash = %d"
930 							" pdev_id = %d"
931 							" vdev_id = %d",
932 							++num_entries,
933 							ase->mac_addr.raw,
934 							ase->peer->mac_addr.raw,
935 							type[ase->type],
936 							ase->next_hop,
937 							ase->is_active,
938 							ase->is_bss,
939 							ase->ast_idx,
940 							ase->ast_hash_value,
941 							ase->pdev_id,
942 							ase->vdev_id);
943 				}
944 			}
945 		}
946 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
947 	}
948 	qdf_spin_unlock_bh(&soc->ast_lock);
949 }
950 #else
951 static void dp_print_ast_stats(struct dp_soc *soc)
952 {
953 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
954 	return;
955 }
956 #endif
957 
958 static void dp_print_peer_table(struct dp_vdev *vdev)
959 {
960 	struct dp_peer *peer = NULL;
961 
962 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
963 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
964 		if (!peer) {
965 			DP_PRINT_STATS("Invalid Peer");
966 			return;
967 		}
968 		DP_PRINT_STATS("    peer_mac_addr = %pM"
969 			" nawds_enabled = %d"
970 			" bss_peer = %d"
971 			" wapi = %d"
972 			" wds_enabled = %d"
973 			" delete in progress = %d",
974 			peer->mac_addr.raw,
975 			peer->nawds_enabled,
976 			peer->bss_peer,
977 			peer->wapi,
978 			peer->wds_enabled,
979 			peer->delete_in_progress);
980 	}
981 }
982 
983 /*
984  * dp_setup_srng - Internal function to setup SRNG rings used by data path
985  */
986 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
987 	int ring_type, int ring_num, int mac_id, uint32_t num_entries)
988 {
989 	void *hal_soc = soc->hal_soc;
990 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
991 	/* TODO: See if we should get align size from hal */
992 	uint32_t ring_base_align = 8;
993 	struct hal_srng_params ring_params;
994 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
995 
996 	/* TODO: Currently hal layer takes care of endianness related settings.
997 	 * See if these settings need to passed from DP layer
998 	 */
999 	ring_params.flags = 0;
1000 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1001 		  FL("Ring type: %d, num:%d"), ring_type, ring_num);
1002 
1003 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
1004 	srng->hal_srng = NULL;
1005 	srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
1006 	srng->num_entries = num_entries;
1007 	srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
1008 		soc->osdev, soc->osdev->dev, srng->alloc_size,
1009 		&(srng->base_paddr_unaligned));
1010 
1011 	if (!srng->base_vaddr_unaligned) {
1012 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1013 			FL("alloc failed - ring_type: %d, ring_num %d"),
1014 			ring_type, ring_num);
1015 		return QDF_STATUS_E_NOMEM;
1016 	}
1017 
1018 	ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
1019 		((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
1020 	ring_params.ring_base_paddr = srng->base_paddr_unaligned +
1021 		((unsigned long)(ring_params.ring_base_vaddr) -
1022 		(unsigned long)srng->base_vaddr_unaligned);
1023 	ring_params.num_entries = num_entries;
1024 
1025 	if (soc->intr_mode == DP_INTR_MSI) {
1026 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
1027 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1028 			  FL("Using MSI for ring_type: %d, ring_num %d"),
1029 			  ring_type, ring_num);
1030 
1031 	} else {
1032 		ring_params.msi_data = 0;
1033 		ring_params.msi_addr = 0;
1034 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1035 			  FL("Skipping MSI for ring_type: %d, ring_num %d"),
1036 			  ring_type, ring_num);
1037 	}
1038 
1039 	/*
1040 	 * Setup interrupt timer and batch counter thresholds for
1041 	 * interrupt mitigation based on ring type
1042 	 */
1043 	if (ring_type == REO_DST) {
1044 		ring_params.intr_timer_thres_us =
1045 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1046 		ring_params.intr_batch_cntr_thres_entries =
1047 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1048 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
1049 		ring_params.intr_timer_thres_us =
1050 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1051 		ring_params.intr_batch_cntr_thres_entries =
1052 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1053 	} else {
1054 		ring_params.intr_timer_thres_us =
1055 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1056 		ring_params.intr_batch_cntr_thres_entries =
1057 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1058 	}
1059 
1060 	/* Enable low threshold interrupts for rx buffer rings (regular and
1061 	 * monitor buffer rings.
1062 	 * TODO: See if this is required for any other ring
1063 	 */
1064 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1065 		(ring_type == RXDMA_MONITOR_STATUS)) {
1066 		/* TODO: Setting low threshold to 1/8th of ring size
1067 		 * see if this needs to be configurable
1068 		 */
1069 		ring_params.low_threshold = num_entries >> 3;
1070 		ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1071 		ring_params.intr_timer_thres_us =
1072 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1073 		ring_params.intr_batch_cntr_thres_entries = 0;
1074 	}
1075 
1076 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
1077 		mac_id, &ring_params);
1078 
1079 	if (!srng->hal_srng) {
1080 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1081 				srng->alloc_size,
1082 				srng->base_vaddr_unaligned,
1083 				srng->base_paddr_unaligned, 0);
1084 	}
1085 
1086 	return 0;
1087 }
1088 
1089 /**
1090  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
1091  * Any buffers allocated and attached to ring entries are expected to be freed
1092  * before calling this function.
1093  */
1094 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
1095 	int ring_type, int ring_num)
1096 {
1097 	if (!srng->hal_srng) {
1098 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1099 			FL("Ring type: %d, num:%d not setup"),
1100 			ring_type, ring_num);
1101 		return;
1102 	}
1103 
1104 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1105 
1106 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1107 				srng->alloc_size,
1108 				srng->base_vaddr_unaligned,
1109 				srng->base_paddr_unaligned, 0);
1110 	srng->hal_srng = NULL;
1111 }
1112 
1113 /* TODO: Need this interface from HIF */
1114 void *hif_get_hal_handle(void *hif_handle);
1115 
1116 /*
1117  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1118  * @dp_ctx: DP SOC handle
1119  * @budget: Number of frames/descriptors that can be processed in one shot
1120  *
1121  * Return: remaining budget/quota for the soc device
1122  */
1123 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
1124 {
1125 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1126 	struct dp_soc *soc = int_ctx->soc;
1127 	int ring = 0;
1128 	uint32_t work_done  = 0;
1129 	int budget = dp_budget;
1130 	uint8_t tx_mask = int_ctx->tx_ring_mask;
1131 	uint8_t rx_mask = int_ctx->rx_ring_mask;
1132 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1133 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1134 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1135 	uint32_t remaining_quota = dp_budget;
1136 	struct dp_pdev *pdev = NULL;
1137 	int mac_id;
1138 
1139 	/* Process Tx completion interrupts first to return back buffers */
1140 	while (tx_mask) {
1141 		if (tx_mask & 0x1) {
1142 			work_done = dp_tx_comp_handler(soc,
1143 					soc->tx_comp_ring[ring].hal_srng,
1144 					remaining_quota);
1145 
1146 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1147 				"tx mask 0x%x ring %d, budget %d, work_done %d",
1148 				tx_mask, ring, budget, work_done);
1149 
1150 			budget -= work_done;
1151 			if (budget <= 0)
1152 				goto budget_done;
1153 
1154 			remaining_quota = budget;
1155 		}
1156 		tx_mask = tx_mask >> 1;
1157 		ring++;
1158 	}
1159 
1160 
1161 	/* Process REO Exception ring interrupt */
1162 	if (rx_err_mask) {
1163 		work_done = dp_rx_err_process(soc,
1164 				soc->reo_exception_ring.hal_srng,
1165 				remaining_quota);
1166 
1167 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1168 			"REO Exception Ring: work_done %d budget %d",
1169 			work_done, budget);
1170 
1171 		budget -=  work_done;
1172 		if (budget <= 0) {
1173 			goto budget_done;
1174 		}
1175 		remaining_quota = budget;
1176 	}
1177 
1178 	/* Process Rx WBM release ring interrupt */
1179 	if (rx_wbm_rel_mask) {
1180 		work_done = dp_rx_wbm_err_process(soc,
1181 				soc->rx_rel_ring.hal_srng, remaining_quota);
1182 
1183 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1184 			"WBM Release Ring: work_done %d budget %d",
1185 			work_done, budget);
1186 
1187 		budget -=  work_done;
1188 		if (budget <= 0) {
1189 			goto budget_done;
1190 		}
1191 		remaining_quota = budget;
1192 	}
1193 
1194 	/* Process Rx interrupts */
1195 	if (rx_mask) {
1196 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1197 			if (rx_mask & (1 << ring)) {
1198 				work_done = dp_rx_process(int_ctx,
1199 					    soc->reo_dest_ring[ring].hal_srng,
1200 					    ring,
1201 					    remaining_quota);
1202 
1203 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1204 					"rx mask 0x%x ring %d, work_done %d budget %d",
1205 					rx_mask, ring, work_done, budget);
1206 
1207 				budget -=  work_done;
1208 				if (budget <= 0)
1209 					goto budget_done;
1210 				remaining_quota = budget;
1211 			}
1212 		}
1213 		for (ring = 0; ring < MAX_RX_MAC_RINGS; ring++) {
1214 			work_done = dp_rxdma_err_process(soc, ring,
1215 						remaining_quota);
1216 			budget -= work_done;
1217 		}
1218 	}
1219 
1220 	if (reo_status_mask)
1221 		dp_reo_status_ring_handler(soc);
1222 
1223 	/* Process LMAC interrupts */
1224 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
1225 		pdev = soc->pdev_list[ring];
1226 		if (pdev == NULL)
1227 			continue;
1228 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1229 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
1230 								pdev->pdev_id);
1231 
1232 			if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1233 				work_done = dp_mon_process(soc, mac_for_pdev,
1234 						remaining_quota);
1235 				budget -= work_done;
1236 				if (budget <= 0)
1237 					goto budget_done;
1238 				remaining_quota = budget;
1239 			}
1240 
1241 			if (int_ctx->rxdma2host_ring_mask &
1242 					(1 << mac_for_pdev)) {
1243 				work_done = dp_rxdma_err_process(soc,
1244 							mac_for_pdev,
1245 							remaining_quota);
1246 				budget -=  work_done;
1247 				if (budget <= 0)
1248 					goto budget_done;
1249 				remaining_quota = budget;
1250 			}
1251 
1252 			if (int_ctx->host2rxdma_ring_mask &
1253 						(1 << mac_for_pdev)) {
1254 				union dp_rx_desc_list_elem_t *desc_list = NULL;
1255 				union dp_rx_desc_list_elem_t *tail = NULL;
1256 				struct dp_srng *rx_refill_buf_ring =
1257 					&pdev->rx_refill_buf_ring;
1258 
1259 				DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1260 						1);
1261 				dp_rx_buffers_replenish(soc, mac_for_pdev,
1262 					rx_refill_buf_ring,
1263 					&soc->rx_desc_buf[mac_for_pdev], 0,
1264 					&desc_list, &tail);
1265 			}
1266 		}
1267 	}
1268 
1269 	qdf_lro_flush(int_ctx->lro_ctx);
1270 
1271 budget_done:
1272 	return dp_budget - budget;
1273 }
1274 
1275 /* dp_interrupt_timer()- timer poll for interrupts
1276  *
1277  * @arg: SoC Handle
1278  *
1279  * Return:
1280  *
1281  */
1282 static void dp_interrupt_timer(void *arg)
1283 {
1284 	struct dp_soc *soc = (struct dp_soc *) arg;
1285 	int i;
1286 
1287 	if (qdf_atomic_read(&soc->cmn_init_done)) {
1288 		for (i = 0;
1289 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1290 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
1291 
1292 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1293 	}
1294 }
1295 
1296 /*
1297  * dp_soc_interrupt_attach_poll() - Register handlers for DP interrupts
1298  * @txrx_soc: DP SOC handle
1299  *
1300  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1301  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1302  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1303  *
1304  * Return: 0 for success. nonzero for failure.
1305  */
1306 static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
1307 {
1308 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1309 	int i;
1310 
1311 	soc->intr_mode = DP_INTR_POLL;
1312 
1313 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1314 		soc->intr_ctx[i].dp_intr_id = i;
1315 		soc->intr_ctx[i].tx_ring_mask =
1316 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1317 		soc->intr_ctx[i].rx_ring_mask =
1318 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1319 		soc->intr_ctx[i].rx_mon_ring_mask =
1320 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1321 		soc->intr_ctx[i].rx_err_ring_mask =
1322 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1323 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1324 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1325 		soc->intr_ctx[i].reo_status_ring_mask =
1326 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1327 		soc->intr_ctx[i].rxdma2host_ring_mask =
1328 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1329 		soc->intr_ctx[i].soc = soc;
1330 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1331 	}
1332 
1333 	qdf_timer_init(soc->osdev, &soc->int_timer,
1334 			dp_interrupt_timer, (void *)soc,
1335 			QDF_TIMER_TYPE_WAKE_APPS);
1336 
1337 	return QDF_STATUS_SUCCESS;
1338 }
1339 
1340 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
1341 #if defined(CONFIG_MCL)
1342 extern int con_mode_monitor;
1343 /*
1344  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1345  * @txrx_soc: DP SOC handle
1346  *
1347  * Call the appropriate attach function based on the mode of operation.
1348  * This is a WAR for enabling monitor mode.
1349  *
1350  * Return: 0 for success. nonzero for failure.
1351  */
1352 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1353 {
1354 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1355 
1356 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1357 	     con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
1358 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1359 				  "%s: Poll mode", __func__);
1360 		return dp_soc_attach_poll(txrx_soc);
1361 	} else {
1362 
1363 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1364 				  "%s: Interrupt  mode", __func__);
1365 		return dp_soc_interrupt_attach(txrx_soc);
1366 	}
1367 }
1368 #else
1369 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
1370 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1371 {
1372 	return dp_soc_attach_poll(txrx_soc);
1373 }
1374 #else
1375 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1376 {
1377 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1378 
1379 	if (hif_is_polled_mode_enabled(soc->hif_handle))
1380 		return dp_soc_attach_poll(txrx_soc);
1381 	else
1382 		return dp_soc_interrupt_attach(txrx_soc);
1383 }
1384 #endif
1385 #endif
1386 
1387 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1388 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1389 {
1390 	int j;
1391 	int num_irq = 0;
1392 
1393 	int tx_mask =
1394 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1395 	int rx_mask =
1396 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1397 	int rx_mon_mask =
1398 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1399 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1400 					soc->wlan_cfg_ctx, intr_ctx_num);
1401 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1402 					soc->wlan_cfg_ctx, intr_ctx_num);
1403 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1404 					soc->wlan_cfg_ctx, intr_ctx_num);
1405 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1406 					soc->wlan_cfg_ctx, intr_ctx_num);
1407 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1408 					soc->wlan_cfg_ctx, intr_ctx_num);
1409 
1410 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1411 
1412 		if (tx_mask & (1 << j)) {
1413 			irq_id_map[num_irq++] =
1414 				(wbm2host_tx_completions_ring1 - j);
1415 		}
1416 
1417 		if (rx_mask & (1 << j)) {
1418 			irq_id_map[num_irq++] =
1419 				(reo2host_destination_ring1 - j);
1420 		}
1421 
1422 		if (rxdma2host_ring_mask & (1 << j)) {
1423 			irq_id_map[num_irq++] =
1424 				rxdma2host_destination_ring_mac1 -
1425 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1426 		}
1427 
1428 		if (host2rxdma_ring_mask & (1 << j)) {
1429 			irq_id_map[num_irq++] =
1430 				host2rxdma_host_buf_ring_mac1 -
1431 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1432 		}
1433 
1434 		if (rx_mon_mask & (1 << j)) {
1435 			irq_id_map[num_irq++] =
1436 				ppdu_end_interrupts_mac1 -
1437 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1438 			irq_id_map[num_irq++] =
1439 				rxdma2host_monitor_status_ring_mac1 -
1440 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1441 		}
1442 
1443 		if (rx_wbm_rel_ring_mask & (1 << j))
1444 			irq_id_map[num_irq++] = wbm2host_rx_release;
1445 
1446 		if (rx_err_ring_mask & (1 << j))
1447 			irq_id_map[num_irq++] = reo2host_exception;
1448 
1449 		if (reo_status_ring_mask & (1 << j))
1450 			irq_id_map[num_irq++] = reo2host_status;
1451 
1452 	}
1453 	*num_irq_r = num_irq;
1454 }
1455 
1456 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1457 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1458 		int msi_vector_count, int msi_vector_start)
1459 {
1460 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1461 					soc->wlan_cfg_ctx, intr_ctx_num);
1462 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1463 					soc->wlan_cfg_ctx, intr_ctx_num);
1464 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1465 					soc->wlan_cfg_ctx, intr_ctx_num);
1466 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1467 					soc->wlan_cfg_ctx, intr_ctx_num);
1468 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1469 					soc->wlan_cfg_ctx, intr_ctx_num);
1470 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1471 					soc->wlan_cfg_ctx, intr_ctx_num);
1472 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1473 					soc->wlan_cfg_ctx, intr_ctx_num);
1474 
1475 	unsigned int vector =
1476 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1477 	int num_irq = 0;
1478 
1479 	soc->intr_mode = DP_INTR_MSI;
1480 
1481 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1482 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1483 		irq_id_map[num_irq++] =
1484 			pld_get_msi_irq(soc->osdev->dev, vector);
1485 
1486 	*num_irq_r = num_irq;
1487 }
1488 
1489 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1490 				    int *irq_id_map, int *num_irq)
1491 {
1492 	int msi_vector_count, ret;
1493 	uint32_t msi_base_data, msi_vector_start;
1494 
1495 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1496 					    &msi_vector_count,
1497 					    &msi_base_data,
1498 					    &msi_vector_start);
1499 	if (ret)
1500 		return dp_soc_interrupt_map_calculate_integrated(soc,
1501 				intr_ctx_num, irq_id_map, num_irq);
1502 
1503 	else
1504 		dp_soc_interrupt_map_calculate_msi(soc,
1505 				intr_ctx_num, irq_id_map, num_irq,
1506 				msi_vector_count, msi_vector_start);
1507 }
1508 
1509 /*
1510  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1511  * @txrx_soc: DP SOC handle
1512  *
1513  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1514  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1515  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1516  *
1517  * Return: 0 for success. nonzero for failure.
1518  */
1519 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
1520 {
1521 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1522 
1523 	int i = 0;
1524 	int num_irq = 0;
1525 
1526 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1527 		int ret = 0;
1528 
1529 		/* Map of IRQ ids registered with one interrupt context */
1530 		int irq_id_map[HIF_MAX_GRP_IRQ];
1531 
1532 		int tx_mask =
1533 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1534 		int rx_mask =
1535 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1536 		int rx_mon_mask =
1537 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
1538 		int rx_err_ring_mask =
1539 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1540 		int rx_wbm_rel_ring_mask =
1541 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1542 		int reo_status_ring_mask =
1543 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1544 		int rxdma2host_ring_mask =
1545 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1546 		int host2rxdma_ring_mask =
1547 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1548 
1549 
1550 		soc->intr_ctx[i].dp_intr_id = i;
1551 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1552 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1553 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1554 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1555 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1556 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1557 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1558 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1559 
1560 		soc->intr_ctx[i].soc = soc;
1561 
1562 		num_irq = 0;
1563 
1564 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1565 					       &num_irq);
1566 
1567 		ret = hif_register_ext_group(soc->hif_handle,
1568 				num_irq, irq_id_map, dp_service_srngs,
1569 				&soc->intr_ctx[i], "dp_intr",
1570 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1571 
1572 		if (ret) {
1573 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1574 			FL("failed, ret = %d"), ret);
1575 
1576 			return QDF_STATUS_E_FAILURE;
1577 		}
1578 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1579 	}
1580 
1581 	hif_configure_ext_group_interrupts(soc->hif_handle);
1582 
1583 	return QDF_STATUS_SUCCESS;
1584 }
1585 
1586 /*
1587  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1588  * @txrx_soc: DP SOC handle
1589  *
1590  * Return: void
1591  */
1592 static void dp_soc_interrupt_detach(void *txrx_soc)
1593 {
1594 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1595 	int i;
1596 
1597 	if (soc->intr_mode == DP_INTR_POLL) {
1598 		qdf_timer_stop(&soc->int_timer);
1599 		qdf_timer_free(&soc->int_timer);
1600 	} else {
1601 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1602 	}
1603 
1604 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1605 		soc->intr_ctx[i].tx_ring_mask = 0;
1606 		soc->intr_ctx[i].rx_ring_mask = 0;
1607 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1608 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1609 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1610 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1611 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1612 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1613 
1614 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1615 	}
1616 }
1617 
1618 #define AVG_MAX_MPDUS_PER_TID 128
1619 #define AVG_TIDS_PER_CLIENT 2
1620 #define AVG_FLOWS_PER_TID 2
1621 #define AVG_MSDUS_PER_FLOW 128
1622 #define AVG_MSDUS_PER_MPDU 4
1623 
1624 /*
1625  * Allocate and setup link descriptor pool that will be used by HW for
1626  * various link and queue descriptors and managed by WBM
1627  */
1628 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1629 {
1630 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1631 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1632 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1633 	uint32_t num_mpdus_per_link_desc =
1634 		hal_num_mpdus_per_link_desc(soc->hal_soc);
1635 	uint32_t num_msdus_per_link_desc =
1636 		hal_num_msdus_per_link_desc(soc->hal_soc);
1637 	uint32_t num_mpdu_links_per_queue_desc =
1638 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1639 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1640 	uint32_t total_link_descs, total_mem_size;
1641 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1642 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1643 	uint32_t num_link_desc_banks;
1644 	uint32_t last_bank_size = 0;
1645 	uint32_t entry_size, num_entries;
1646 	int i;
1647 	uint32_t desc_id = 0;
1648 
1649 	/* Only Tx queue descriptors are allocated from common link descriptor
1650 	 * pool Rx queue descriptors are not included in this because (REO queue
1651 	 * extension descriptors) they are expected to be allocated contiguously
1652 	 * with REO queue descriptors
1653 	 */
1654 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1655 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1656 
1657 	num_mpdu_queue_descs = num_mpdu_link_descs /
1658 		num_mpdu_links_per_queue_desc;
1659 
1660 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1661 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1662 		num_msdus_per_link_desc;
1663 
1664 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1665 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1666 
1667 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1668 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1669 
1670 	/* Round up to power of 2 */
1671 	total_link_descs = 1;
1672 	while (total_link_descs < num_entries)
1673 		total_link_descs <<= 1;
1674 
1675 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1676 		FL("total_link_descs: %u, link_desc_size: %d"),
1677 		total_link_descs, link_desc_size);
1678 	total_mem_size =  total_link_descs * link_desc_size;
1679 
1680 	total_mem_size += link_desc_align;
1681 
1682 	if (total_mem_size <= max_alloc_size) {
1683 		num_link_desc_banks = 0;
1684 		last_bank_size = total_mem_size;
1685 	} else {
1686 		num_link_desc_banks = (total_mem_size) /
1687 			(max_alloc_size - link_desc_align);
1688 		last_bank_size = total_mem_size %
1689 			(max_alloc_size - link_desc_align);
1690 	}
1691 
1692 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1693 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
1694 		total_mem_size, num_link_desc_banks);
1695 
1696 	for (i = 0; i < num_link_desc_banks; i++) {
1697 		soc->link_desc_banks[i].base_vaddr_unaligned =
1698 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1699 			max_alloc_size,
1700 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1701 		soc->link_desc_banks[i].size = max_alloc_size;
1702 
1703 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1704 			soc->link_desc_banks[i].base_vaddr_unaligned) +
1705 			((unsigned long)(
1706 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1707 			link_desc_align));
1708 
1709 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
1710 			soc->link_desc_banks[i].base_paddr_unaligned) +
1711 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1712 			(unsigned long)(
1713 			soc->link_desc_banks[i].base_vaddr_unaligned));
1714 
1715 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
1716 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1717 				FL("Link descriptor memory alloc failed"));
1718 			goto fail;
1719 		}
1720 	}
1721 
1722 	if (last_bank_size) {
1723 		/* Allocate last bank in case total memory required is not exact
1724 		 * multiple of max_alloc_size
1725 		 */
1726 		soc->link_desc_banks[i].base_vaddr_unaligned =
1727 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1728 			last_bank_size,
1729 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1730 		soc->link_desc_banks[i].size = last_bank_size;
1731 
1732 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1733 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
1734 			((unsigned long)(
1735 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1736 			link_desc_align));
1737 
1738 		soc->link_desc_banks[i].base_paddr =
1739 			(unsigned long)(
1740 			soc->link_desc_banks[i].base_paddr_unaligned) +
1741 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1742 			(unsigned long)(
1743 			soc->link_desc_banks[i].base_vaddr_unaligned));
1744 	}
1745 
1746 
1747 	/* Allocate and setup link descriptor idle list for HW internal use */
1748 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1749 	total_mem_size = entry_size * total_link_descs;
1750 
1751 	if (total_mem_size <= max_alloc_size) {
1752 		void *desc;
1753 
1754 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
1755 			WBM_IDLE_LINK, 0, 0, total_link_descs)) {
1756 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1757 				FL("Link desc idle ring setup failed"));
1758 			goto fail;
1759 		}
1760 
1761 		hal_srng_access_start_unlocked(soc->hal_soc,
1762 			soc->wbm_idle_link_ring.hal_srng);
1763 
1764 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1765 			soc->link_desc_banks[i].base_paddr; i++) {
1766 			uint32_t num_entries = (soc->link_desc_banks[i].size -
1767 				((unsigned long)(
1768 				soc->link_desc_banks[i].base_vaddr) -
1769 				(unsigned long)(
1770 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1771 				/ link_desc_size;
1772 			unsigned long paddr = (unsigned long)(
1773 				soc->link_desc_banks[i].base_paddr);
1774 
1775 			while (num_entries && (desc = hal_srng_src_get_next(
1776 				soc->hal_soc,
1777 				soc->wbm_idle_link_ring.hal_srng))) {
1778 				hal_set_link_desc_addr(desc,
1779 					LINK_DESC_COOKIE(desc_id, i), paddr);
1780 				num_entries--;
1781 				desc_id++;
1782 				paddr += link_desc_size;
1783 			}
1784 		}
1785 		hal_srng_access_end_unlocked(soc->hal_soc,
1786 			soc->wbm_idle_link_ring.hal_srng);
1787 	} else {
1788 		uint32_t num_scatter_bufs;
1789 		uint32_t num_entries_per_buf;
1790 		uint32_t rem_entries;
1791 		uint8_t *scatter_buf_ptr;
1792 		uint16_t scatter_buf_num;
1793 
1794 		soc->wbm_idle_scatter_buf_size =
1795 			hal_idle_list_scatter_buf_size(soc->hal_soc);
1796 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
1797 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
1798 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1799 					soc->hal_soc, total_mem_size,
1800 					soc->wbm_idle_scatter_buf_size);
1801 
1802 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
1803 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1804 					FL("scatter bufs size out of bounds"));
1805 			goto fail;
1806 		}
1807 
1808 		for (i = 0; i < num_scatter_bufs; i++) {
1809 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
1810 				qdf_mem_alloc_consistent(soc->osdev,
1811 							soc->osdev->dev,
1812 				soc->wbm_idle_scatter_buf_size,
1813 				&(soc->wbm_idle_scatter_buf_base_paddr[i]));
1814 			if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
1815 				QDF_TRACE(QDF_MODULE_ID_DP,
1816 						QDF_TRACE_LEVEL_ERROR,
1817 					FL("Scatter list memory alloc failed"));
1818 				goto fail;
1819 			}
1820 		}
1821 
1822 		/* Populate idle list scatter buffers with link descriptor
1823 		 * pointers
1824 		 */
1825 		scatter_buf_num = 0;
1826 		scatter_buf_ptr = (uint8_t *)(
1827 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
1828 		rem_entries = num_entries_per_buf;
1829 
1830 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1831 			soc->link_desc_banks[i].base_paddr; i++) {
1832 			uint32_t num_link_descs =
1833 				(soc->link_desc_banks[i].size -
1834 				((unsigned long)(
1835 				soc->link_desc_banks[i].base_vaddr) -
1836 				(unsigned long)(
1837 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1838 				/ link_desc_size;
1839 			unsigned long paddr = (unsigned long)(
1840 				soc->link_desc_banks[i].base_paddr);
1841 
1842 			while (num_link_descs) {
1843 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
1844 					LINK_DESC_COOKIE(desc_id, i), paddr);
1845 				num_link_descs--;
1846 				desc_id++;
1847 				paddr += link_desc_size;
1848 				rem_entries--;
1849 				if (rem_entries) {
1850 					scatter_buf_ptr += entry_size;
1851 				} else {
1852 					rem_entries = num_entries_per_buf;
1853 					scatter_buf_num++;
1854 
1855 					if (scatter_buf_num >= num_scatter_bufs)
1856 						break;
1857 
1858 					scatter_buf_ptr = (uint8_t *)(
1859 						soc->wbm_idle_scatter_buf_base_vaddr[
1860 						scatter_buf_num]);
1861 				}
1862 			}
1863 		}
1864 		/* Setup link descriptor idle list in HW */
1865 		hal_setup_link_idle_list(soc->hal_soc,
1866 			soc->wbm_idle_scatter_buf_base_paddr,
1867 			soc->wbm_idle_scatter_buf_base_vaddr,
1868 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
1869 			(uint32_t)(scatter_buf_ptr -
1870 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
1871 			scatter_buf_num-1])), total_link_descs);
1872 	}
1873 	return 0;
1874 
1875 fail:
1876 	if (soc->wbm_idle_link_ring.hal_srng) {
1877 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
1878 				WBM_IDLE_LINK, 0);
1879 	}
1880 
1881 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1882 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1883 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1884 				soc->wbm_idle_scatter_buf_size,
1885 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1886 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1887 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1888 		}
1889 	}
1890 
1891 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1892 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1893 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1894 				soc->link_desc_banks[i].size,
1895 				soc->link_desc_banks[i].base_vaddr_unaligned,
1896 				soc->link_desc_banks[i].base_paddr_unaligned,
1897 				0);
1898 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1899 		}
1900 	}
1901 	return QDF_STATUS_E_FAILURE;
1902 }
1903 
1904 /*
1905  * Free link descriptor pool that was setup HW
1906  */
1907 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
1908 {
1909 	int i;
1910 
1911 	if (soc->wbm_idle_link_ring.hal_srng) {
1912 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
1913 			WBM_IDLE_LINK, 0);
1914 	}
1915 
1916 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1917 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1918 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1919 				soc->wbm_idle_scatter_buf_size,
1920 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1921 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1922 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1923 		}
1924 	}
1925 
1926 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1927 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1928 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1929 				soc->link_desc_banks[i].size,
1930 				soc->link_desc_banks[i].base_vaddr_unaligned,
1931 				soc->link_desc_banks[i].base_paddr_unaligned,
1932 				0);
1933 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1934 		}
1935 	}
1936 }
1937 
1938 #define REO_DST_RING_SIZE_QCA6290 1024
1939 #ifndef QCA_WIFI_QCA8074_VP
1940 #define REO_DST_RING_SIZE_QCA8074 2048
1941 #else
1942 #define REO_DST_RING_SIZE_QCA8074 8
1943 #endif
1944 
1945 /*
1946  * dp_wds_aging_timer_fn() - Timer callback function for WDS aging
1947  * @soc: Datapath SOC handle
1948  *
1949  * This is a timer function used to age out stale AST nodes from
1950  * AST table
1951  */
1952 #ifdef FEATURE_WDS
1953 static void dp_wds_aging_timer_fn(void *soc_hdl)
1954 {
1955 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
1956 	struct dp_pdev *pdev;
1957 	struct dp_vdev *vdev;
1958 	struct dp_peer *peer;
1959 	struct dp_ast_entry *ase, *temp_ase;
1960 	int i;
1961 
1962 	qdf_spin_lock_bh(&soc->ast_lock);
1963 
1964 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1965 		pdev = soc->pdev_list[i];
1966 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
1967 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1968 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1969 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
1970 					/*
1971 					 * Do not expire static ast entries
1972 					 * and HM WDS entries
1973 					 */
1974 					if (ase->type != CDP_TXRX_AST_TYPE_WDS)
1975 						continue;
1976 
1977 					if (ase->is_active) {
1978 						ase->is_active = FALSE;
1979 						continue;
1980 					}
1981 
1982 					DP_STATS_INC(soc, ast.aged_out, 1);
1983 					dp_peer_del_ast(soc, ase);
1984 				}
1985 			}
1986 		}
1987 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1988 	}
1989 
1990 	qdf_spin_unlock_bh(&soc->ast_lock);
1991 
1992 	if (qdf_atomic_read(&soc->cmn_init_done))
1993 		qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1994 }
1995 
1996 
1997 /*
1998  * dp_soc_wds_attach() - Setup WDS timer and AST table
1999  * @soc:		Datapath SOC handle
2000  *
2001  * Return: None
2002  */
2003 static void dp_soc_wds_attach(struct dp_soc *soc)
2004 {
2005 	qdf_timer_init(soc->osdev, &soc->wds_aging_timer,
2006 			dp_wds_aging_timer_fn, (void *)soc,
2007 			QDF_TIMER_TYPE_WAKE_APPS);
2008 
2009 	qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
2010 }
2011 
2012 /*
2013  * dp_soc_wds_detach() - Detach WDS data structures and timers
2014  * @txrx_soc: DP SOC handle
2015  *
2016  * Return: None
2017  */
2018 static void dp_soc_wds_detach(struct dp_soc *soc)
2019 {
2020 	qdf_timer_stop(&soc->wds_aging_timer);
2021 	qdf_timer_free(&soc->wds_aging_timer);
2022 }
2023 #else
2024 static void dp_soc_wds_attach(struct dp_soc *soc)
2025 {
2026 }
2027 
2028 static void dp_soc_wds_detach(struct dp_soc *soc)
2029 {
2030 }
2031 #endif
2032 
2033 /*
2034  * dp_soc_reset_ring_map() - Reset cpu ring map
2035  * @soc: Datapath soc handler
2036  *
2037  * This api resets the default cpu ring map
2038  */
2039 
2040 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
2041 {
2042 	uint8_t i;
2043 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2044 
2045 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
2046 		if (nss_config == 1) {
2047 			/*
2048 			 * Setting Tx ring map for one nss offloaded radio
2049 			 */
2050 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
2051 		} else if (nss_config == 2) {
2052 			/*
2053 			 * Setting Tx ring for two nss offloaded radios
2054 			 */
2055 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
2056 		} else {
2057 			/*
2058 			 * Setting Tx ring map for all nss offloaded radios
2059 			 */
2060 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_ALL_RADIO_OFFLOADED_MAP][i];
2061 		}
2062 	}
2063 }
2064 
2065 /*
2066  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
2067  * @dp_soc - DP soc handle
2068  * @ring_type - ring type
2069  * @ring_num - ring_num
2070  *
2071  * return 0 or 1
2072  */
2073 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
2074 {
2075 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2076 	uint8_t status = 0;
2077 
2078 	switch (ring_type) {
2079 	case WBM2SW_RELEASE:
2080 	case REO_DST:
2081 	case RXDMA_BUF:
2082 		status = ((nss_config) & (1 << ring_num));
2083 		break;
2084 	default:
2085 		break;
2086 	}
2087 
2088 	return status;
2089 }
2090 
2091 /*
2092  * dp_soc_reset_intr_mask() - reset interrupt mask
2093  * @dp_soc - DP Soc handle
2094  *
2095  * Return: Return void
2096  */
2097 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
2098 {
2099 	uint8_t j;
2100 	int *grp_mask = NULL;
2101 	int group_number, mask, num_ring;
2102 
2103 	/* number of tx ring */
2104 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2105 
2106 	/*
2107 	 * group mask for tx completion  ring.
2108 	 */
2109 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2110 
2111 	/* loop and reset the mask for only offloaded ring */
2112 	for (j = 0; j < num_ring; j++) {
2113 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2114 			continue;
2115 		}
2116 
2117 		/*
2118 		 * Group number corresponding to tx offloaded ring.
2119 		 */
2120 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2121 		if (group_number < 0) {
2122 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2123 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2124 					WBM2SW_RELEASE, j);
2125 			return;
2126 		}
2127 
2128 		/* reset the tx mask for offloaded ring */
2129 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2130 		mask &= (~(1 << j));
2131 
2132 		/*
2133 		 * reset the interrupt mask for offloaded ring.
2134 		 */
2135 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2136 	}
2137 
2138 	/* number of rx rings */
2139 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2140 
2141 	/*
2142 	 * group mask for reo destination ring.
2143 	 */
2144 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2145 
2146 	/* loop and reset the mask for only offloaded ring */
2147 	for (j = 0; j < num_ring; j++) {
2148 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2149 			continue;
2150 		}
2151 
2152 		/*
2153 		 * Group number corresponding to rx offloaded ring.
2154 		 */
2155 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2156 		if (group_number < 0) {
2157 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2158 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2159 					REO_DST, j);
2160 			return;
2161 		}
2162 
2163 		/* set the interrupt mask for offloaded ring */
2164 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2165 		mask &= (~(1 << j));
2166 
2167 		/*
2168 		 * set the interrupt mask to zero for rx offloaded radio.
2169 		 */
2170 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2171 	}
2172 
2173 	/*
2174 	 * group mask for Rx buffer refill ring
2175 	 */
2176 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2177 
2178 	/* loop and reset the mask for only offloaded ring */
2179 	for (j = 0; j < MAX_PDEV_CNT; j++) {
2180 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2181 			continue;
2182 		}
2183 
2184 		/*
2185 		 * Group number corresponding to rx offloaded ring.
2186 		 */
2187 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2188 		if (group_number < 0) {
2189 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2190 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2191 					REO_DST, j);
2192 			return;
2193 		}
2194 
2195 		/* set the interrupt mask for offloaded ring */
2196 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2197 				group_number);
2198 		mask &= (~(1 << j));
2199 
2200 		/*
2201 		 * set the interrupt mask to zero for rx offloaded radio.
2202 		 */
2203 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2204 			group_number, mask);
2205 	}
2206 }
2207 
2208 #ifdef IPA_OFFLOAD
2209 /**
2210  * dp_reo_remap_config() - configure reo remap register value based
2211  *                         nss configuration.
2212  *		based on offload_radio value below remap configuration
2213  *		get applied.
2214  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2215  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2216  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2217  *		3 - both Radios handled by NSS (remap not required)
2218  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2219  *
2220  * @remap1: output parameter indicates reo remap 1 register value
2221  * @remap2: output parameter indicates reo remap 2 register value
2222  * Return: bool type, true if remap is configured else false.
2223  */
2224 static bool dp_reo_remap_config(struct dp_soc *soc,
2225 				uint32_t *remap1,
2226 				uint32_t *remap2)
2227 {
2228 
2229 	*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
2230 		(0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
2231 
2232 	*remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
2233 		(0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
2234 
2235 	return true;
2236 }
2237 #else
2238 static bool dp_reo_remap_config(struct dp_soc *soc,
2239 				uint32_t *remap1,
2240 				uint32_t *remap2)
2241 {
2242 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2243 
2244 	switch (offload_radio) {
2245 	case 0:
2246 		*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2247 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2248 			(0x3 << 18) | (0x4 << 21)) << 8;
2249 
2250 		*remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2251 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2252 			(0x3 << 18) | (0x4 << 21)) << 8;
2253 		break;
2254 
2255 	case 1:
2256 		*remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
2257 			(0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
2258 			(0x2 << 18) | (0x3 << 21)) << 8;
2259 
2260 		*remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
2261 			(0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
2262 			(0x4 << 18) | (0x2 << 21)) << 8;
2263 		break;
2264 
2265 	case 2:
2266 		*remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
2267 			(0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
2268 			(0x1 << 18) | (0x3 << 21)) << 8;
2269 
2270 		*remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
2271 			(0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
2272 			(0x4 << 18) | (0x1 << 21)) << 8;
2273 		break;
2274 
2275 	case 3:
2276 		/* return false if both radios are offloaded to NSS */
2277 		return false;
2278 	}
2279 	return true;
2280 }
2281 #endif
2282 
2283 /*
2284  * dp_reo_frag_dst_set() - configure reo register to set the
2285  *                        fragment destination ring
2286  * @soc : Datapath soc
2287  * @frag_dst_ring : output parameter to set fragment destination ring
2288  *
2289  * Based on offload_radio below fragment destination rings is selected
2290  * 0 - TCL
2291  * 1 - SW1
2292  * 2 - SW2
2293  * 3 - SW3
2294  * 4 - SW4
2295  * 5 - Release
2296  * 6 - FW
2297  * 7 - alternate select
2298  *
2299  * return: void
2300  */
2301 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2302 {
2303 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2304 
2305 	switch (offload_radio) {
2306 	case 0:
2307 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2308 		break;
2309 	case 3:
2310 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2311 		break;
2312 	default:
2313 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2314 				FL("dp_reo_frag_dst_set invalid offload radio config"));
2315 		break;
2316 	}
2317 }
2318 
2319 /*
2320  * dp_soc_cmn_setup() - Common SoC level initializion
2321  * @soc:		Datapath SOC handle
2322  *
2323  * This is an internal function used to setup common SOC data structures,
2324  * to be called from PDEV attach after receiving HW mode capabilities from FW
2325  */
2326 static int dp_soc_cmn_setup(struct dp_soc *soc)
2327 {
2328 	int i;
2329 	struct hal_reo_params reo_params;
2330 	int tx_ring_size;
2331 	int tx_comp_ring_size;
2332 	int reo_dst_ring_size;
2333 	uint32_t entries;
2334 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2335 
2336 	if (qdf_atomic_read(&soc->cmn_init_done))
2337 		return 0;
2338 
2339 	if (dp_hw_link_desc_pool_setup(soc))
2340 		goto fail1;
2341 
2342 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2343 	/* Setup SRNG rings */
2344 	/* Common rings */
2345 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
2346 		wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx))) {
2347 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2348 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
2349 		goto fail1;
2350 	}
2351 
2352 
2353 	soc->num_tcl_data_rings = 0;
2354 	/* Tx data rings */
2355 	if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
2356 		soc->num_tcl_data_rings =
2357 			wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
2358 		tx_comp_ring_size =
2359 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2360 		tx_ring_size =
2361 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2362 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2363 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
2364 				TCL_DATA, i, 0, tx_ring_size)) {
2365 				QDF_TRACE(QDF_MODULE_ID_DP,
2366 					QDF_TRACE_LEVEL_ERROR,
2367 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
2368 				goto fail1;
2369 			}
2370 			/*
2371 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2372 			 * count
2373 			 */
2374 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
2375 				WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
2376 				QDF_TRACE(QDF_MODULE_ID_DP,
2377 					QDF_TRACE_LEVEL_ERROR,
2378 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
2379 				goto fail1;
2380 			}
2381 		}
2382 	} else {
2383 		/* This will be incremented during per pdev ring setup */
2384 		soc->num_tcl_data_rings = 0;
2385 	}
2386 
2387 	if (dp_tx_soc_attach(soc)) {
2388 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2389 				FL("dp_tx_soc_attach failed"));
2390 		goto fail1;
2391 	}
2392 
2393 	entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
2394 	/* TCL command and status rings */
2395 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
2396 			  entries)) {
2397 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2398 			FL("dp_srng_setup failed for tcl_cmd_ring"));
2399 		goto fail1;
2400 	}
2401 
2402 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
2403 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
2404 			  entries)) {
2405 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2406 			FL("dp_srng_setup failed for tcl_status_ring"));
2407 		goto fail1;
2408 	}
2409 
2410 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2411 
2412 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2413 	 * descriptors
2414 	 */
2415 
2416 	/* Rx data rings */
2417 	if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2418 		soc->num_reo_dest_rings =
2419 			wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
2420 		QDF_TRACE(QDF_MODULE_ID_DP,
2421 			QDF_TRACE_LEVEL_INFO,
2422 			FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
2423 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2424 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2425 				i, 0, reo_dst_ring_size)) {
2426 				QDF_TRACE(QDF_MODULE_ID_DP,
2427 					  QDF_TRACE_LEVEL_ERROR,
2428 					  FL(RNG_ERR "reo_dest_ring [%d]"), i);
2429 				goto fail1;
2430 			}
2431 		}
2432 	} else {
2433 		/* This will be incremented during per pdev ring setup */
2434 		soc->num_reo_dest_rings = 0;
2435 	}
2436 
2437 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2438 	/* LMAC RxDMA to SW Rings configuration */
2439 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
2440 		/* Only valid for MCL */
2441 		struct dp_pdev *pdev = soc->pdev_list[0];
2442 
2443 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2444 			if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2445 					  RXDMA_DST, 0, i,
2446 					  entries)) {
2447 				QDF_TRACE(QDF_MODULE_ID_DP,
2448 					  QDF_TRACE_LEVEL_ERROR,
2449 					  FL(RNG_ERR "rxdma_err_dst_ring"));
2450 				goto fail1;
2451 			}
2452 		}
2453 	}
2454 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
2455 
2456 	/* REO reinjection ring */
2457 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
2458 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2459 			  entries)) {
2460 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2461 			  FL("dp_srng_setup failed for reo_reinject_ring"));
2462 		goto fail1;
2463 	}
2464 
2465 
2466 	/* Rx release ring */
2467 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2468 		wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx))) {
2469 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2470 			  FL("dp_srng_setup failed for rx_rel_ring"));
2471 		goto fail1;
2472 	}
2473 
2474 
2475 	/* Rx exception ring */
2476 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
2477 	if (dp_srng_setup(soc, &soc->reo_exception_ring,
2478 			  REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries)) {
2479 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2480 			  FL("dp_srng_setup failed for reo_exception_ring"));
2481 		goto fail1;
2482 	}
2483 
2484 
2485 	/* REO command and status rings */
2486 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
2487 		wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx))) {
2488 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2489 			FL("dp_srng_setup failed for reo_cmd_ring"));
2490 		goto fail1;
2491 	}
2492 
2493 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2494 	TAILQ_INIT(&soc->rx.reo_cmd_list);
2495 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2496 
2497 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
2498 		wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx))) {
2499 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2500 			FL("dp_srng_setup failed for reo_status_ring"));
2501 		goto fail1;
2502 	}
2503 
2504 	qdf_spinlock_create(&soc->ast_lock);
2505 	dp_soc_wds_attach(soc);
2506 
2507 	/* Reset the cpu ring map if radio is NSS offloaded */
2508 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
2509 		dp_soc_reset_cpu_ring_map(soc);
2510 		dp_soc_reset_intr_mask(soc);
2511 	}
2512 
2513 	/* Setup HW REO */
2514 	qdf_mem_zero(&reo_params, sizeof(reo_params));
2515 
2516 	if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
2517 
2518 		/*
2519 		 * Reo ring remap is not required if both radios
2520 		 * are offloaded to NSS
2521 		 */
2522 		if (!dp_reo_remap_config(soc,
2523 					&reo_params.remap1,
2524 					&reo_params.remap2))
2525 			goto out;
2526 
2527 		reo_params.rx_hash_enabled = true;
2528 	}
2529 
2530 	/* setup the global rx defrag waitlist */
2531 	TAILQ_INIT(&soc->rx.defrag.waitlist);
2532 	soc->rx.defrag.timeout_ms =
2533 		wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
2534 	soc->rx.flags.defrag_timeout_check =
2535 		wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
2536 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
2537 
2538 out:
2539 	/*
2540 	 * set the fragment destination ring
2541 	 */
2542 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2543 
2544 	hal_reo_setup(soc->hal_soc, &reo_params);
2545 
2546 	qdf_atomic_set(&soc->cmn_init_done, 1);
2547 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
2548 	return 0;
2549 fail1:
2550 	/*
2551 	 * Cleanup will be done as part of soc_detach, which will
2552 	 * be called on pdev attach failure
2553 	 */
2554 	return QDF_STATUS_E_FAILURE;
2555 }
2556 
2557 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
2558 
2559 static void dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2560 {
2561 	struct cdp_lro_hash_config lro_hash;
2562 
2563 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2564 		!wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2565 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2566 			 FL("LRO disabled RX hash disabled"));
2567 		return;
2568 	}
2569 
2570 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2571 
2572 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
2573 		lro_hash.lro_enable = 1;
2574 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2575 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
2576 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2577 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
2578 	}
2579 
2580 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, FL("enabled"));
2581 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
2582 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2583 		 LRO_IPV4_SEED_ARR_SZ));
2584 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2585 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2586 		 LRO_IPV6_SEED_ARR_SZ));
2587 
2588 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2589 		 "lro_hash: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
2590 		 lro_hash.lro_enable, lro_hash.tcp_flag,
2591 		 lro_hash.tcp_flag_mask);
2592 
2593 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2594 		 QDF_TRACE_LEVEL_ERROR,
2595 		 (void *)lro_hash.toeplitz_hash_ipv4,
2596 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2597 		 LRO_IPV4_SEED_ARR_SZ));
2598 
2599 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2600 		 QDF_TRACE_LEVEL_ERROR,
2601 		 (void *)lro_hash.toeplitz_hash_ipv6,
2602 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2603 		 LRO_IPV6_SEED_ARR_SZ));
2604 
2605 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2606 
2607 	if (soc->cdp_soc.ol_ops->lro_hash_config)
2608 		(void)soc->cdp_soc.ol_ops->lro_hash_config
2609 			(pdev->ctrl_pdev, &lro_hash);
2610 }
2611 
2612 /*
2613 * dp_rxdma_ring_setup() - configure the RX DMA rings
2614 * @soc: data path SoC handle
2615 * @pdev: Physical device handle
2616 *
2617 * Return: 0 - success, > 0 - failure
2618 */
2619 #ifdef QCA_HOST2FW_RXBUF_RING
2620 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2621 	 struct dp_pdev *pdev)
2622 {
2623 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2624 	int max_mac_rings;
2625 	int i;
2626 
2627 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2628 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
2629 
2630 	for (i = 0; i < max_mac_rings; i++) {
2631 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2632 			 "%s: pdev_id %d mac_id %d",
2633 			 __func__, pdev->pdev_id, i);
2634 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
2635 			RXDMA_BUF, 1, i,
2636 			wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx))) {
2637 			QDF_TRACE(QDF_MODULE_ID_DP,
2638 				 QDF_TRACE_LEVEL_ERROR,
2639 				 FL("failed rx mac ring setup"));
2640 			return QDF_STATUS_E_FAILURE;
2641 		}
2642 	}
2643 	return QDF_STATUS_SUCCESS;
2644 }
2645 #else
2646 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2647 	 struct dp_pdev *pdev)
2648 {
2649 	return QDF_STATUS_SUCCESS;
2650 }
2651 #endif
2652 
2653 /**
2654  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2655  * @pdev - DP_PDEV handle
2656  *
2657  * Return: void
2658  */
2659 static inline void
2660 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2661 {
2662 	uint8_t map_id;
2663 	struct dp_soc *soc = pdev->soc;
2664 
2665 	if (!soc)
2666 		return;
2667 
2668 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2669 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
2670 			     default_dscp_tid_map,
2671 			     sizeof(default_dscp_tid_map));
2672 	}
2673 
2674 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
2675 		hal_tx_set_dscp_tid_map(soc->hal_soc,
2676 					default_dscp_tid_map,
2677 					map_id);
2678 	}
2679 }
2680 
2681 #ifdef IPA_OFFLOAD
2682 /**
2683  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
2684  * @soc: data path instance
2685  * @pdev: core txrx pdev context
2686  *
2687  * Return: QDF_STATUS_SUCCESS: success
2688  *         QDF_STATUS_E_RESOURCES: Error return
2689  */
2690 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2691 					   struct dp_pdev *pdev)
2692 {
2693 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2694 	int entries;
2695 
2696 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2697 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
2698 
2699 	/* Setup second Rx refill buffer ring */
2700 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2701 			  IPA_RX_REFILL_BUF_RING_IDX,
2702 			  pdev->pdev_id,
2703 			  entries)) {
2704 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2705 			FL("dp_srng_setup failed second rx refill ring"));
2706 		return QDF_STATUS_E_FAILURE;
2707 	}
2708 	return QDF_STATUS_SUCCESS;
2709 }
2710 
2711 /**
2712  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
2713  * @soc: data path instance
2714  * @pdev: core txrx pdev context
2715  *
2716  * Return: void
2717  */
2718 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2719 					      struct dp_pdev *pdev)
2720 {
2721 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2722 			IPA_RX_REFILL_BUF_RING_IDX);
2723 }
2724 
2725 #else
2726 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2727 					   struct dp_pdev *pdev)
2728 {
2729 	return QDF_STATUS_SUCCESS;
2730 }
2731 
2732 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2733 					      struct dp_pdev *pdev)
2734 {
2735 }
2736 #endif
2737 
2738 #if !defined(QCA_WIFI_QCA6390) && !defined(DISABLE_MON_CONFIG)
2739 static
2740 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2741 {
2742 	int mac_id = 0;
2743 	int pdev_id = pdev->pdev_id;
2744 	int entries;
2745 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2746 
2747 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2748 
2749 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
2750 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
2751 
2752 		entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
2753 		if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
2754 				  RXDMA_MONITOR_BUF, 0, mac_for_pdev,
2755 				  entries)) {
2756 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2757 				  FL(RNG_ERR "rxdma_mon_buf_ring "));
2758 			return QDF_STATUS_E_NOMEM;
2759 		}
2760 
2761 		entries = wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
2762 		if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
2763 				  RXDMA_MONITOR_DST, 0, mac_for_pdev,
2764 				  entries)) {
2765 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2766 				  FL(RNG_ERR "rxdma_mon_dst_ring"));
2767 			return QDF_STATUS_E_NOMEM;
2768 		}
2769 
2770 		entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
2771 		if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring[mac_id],
2772 				  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
2773 				  entries)) {
2774 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2775 				  FL(RNG_ERR "rxdma_mon_status_ring"));
2776 			return QDF_STATUS_E_NOMEM;
2777 		}
2778 
2779 		entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
2780 		if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
2781 				  RXDMA_MONITOR_DESC, 0, mac_for_pdev,
2782 				  entries)) {
2783 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2784 				  FL(RNG_ERR "rxdma_mon_desc_ring"));
2785 			return QDF_STATUS_E_NOMEM;
2786 		}
2787 	}
2788 	return QDF_STATUS_SUCCESS;
2789 }
2790 #else
2791 static QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2792 {
2793 	return QDF_STATUS_SUCCESS;
2794 }
2795 #endif
2796 
2797 /*dp_iterate_update_peer_list - update peer stats on cal client timer
2798  * @pdev_hdl: pdev handle
2799  */
2800 #ifdef ATH_SUPPORT_EXT_STAT
2801 void  dp_iterate_update_peer_list(void *pdev_hdl)
2802 {
2803 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
2804 	struct dp_vdev *vdev = NULL;
2805 	struct dp_peer *peer = NULL;
2806 
2807 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
2808 		DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
2809 			dp_cal_client_update_peer_stats(&peer->stats);
2810 		}
2811 	}
2812 }
2813 #else
2814 void  dp_iterate_update_peer_list(void *pdev_hdl)
2815 {
2816 }
2817 #endif
2818 
2819 /*
2820 * dp_pdev_attach_wifi3() - attach txrx pdev
2821 * @ctrl_pdev: Opaque PDEV object
2822 * @txrx_soc: Datapath SOC handle
2823 * @htc_handle: HTC handle for host-target interface
2824 * @qdf_osdev: QDF OS device
2825 * @pdev_id: PDEV ID
2826 *
2827 * Return: DP PDEV handle on success, NULL on failure
2828 */
2829 static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
2830 	struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
2831 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
2832 {
2833 	int tx_ring_size;
2834 	int tx_comp_ring_size;
2835 	int reo_dst_ring_size;
2836 	int entries;
2837 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2838 	int nss_cfg;
2839 
2840 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2841 	struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
2842 
2843 	if (!pdev) {
2844 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2845 			FL("DP PDEV memory allocation failed"));
2846 		goto fail0;
2847 	}
2848 
2849 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2850 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
2851 
2852 	if (!pdev->wlan_cfg_ctx) {
2853 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2854 			FL("pdev cfg_attach failed"));
2855 
2856 		qdf_mem_free(pdev);
2857 		goto fail0;
2858 	}
2859 
2860 	/*
2861 	 * set nss pdev config based on soc config
2862 	 */
2863 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
2864 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
2865 			(nss_cfg & (1 << pdev_id)));
2866 
2867 	pdev->soc = soc;
2868 	pdev->ctrl_pdev = ctrl_pdev;
2869 	pdev->pdev_id = pdev_id;
2870 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
2871 	soc->pdev_list[pdev_id] = pdev;
2872 	soc->pdev_count++;
2873 
2874 	TAILQ_INIT(&pdev->vdev_list);
2875 	qdf_spinlock_create(&pdev->vdev_list_lock);
2876 	pdev->vdev_count = 0;
2877 
2878 	qdf_spinlock_create(&pdev->tx_mutex);
2879 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
2880 	TAILQ_INIT(&pdev->neighbour_peers_list);
2881 	pdev->neighbour_peers_added = false;
2882 
2883 	if (dp_soc_cmn_setup(soc)) {
2884 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2885 			FL("dp_soc_cmn_setup failed"));
2886 		goto fail1;
2887 	}
2888 
2889 	/* Setup per PDEV TCL rings if configured */
2890 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2891 		tx_ring_size =
2892 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2893 		tx_comp_ring_size =
2894 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2895 
2896 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
2897 			pdev_id, pdev_id, tx_ring_size)) {
2898 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2899 				FL("dp_srng_setup failed for tcl_data_ring"));
2900 			goto fail1;
2901 		}
2902 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
2903 			WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
2904 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2905 				FL("dp_srng_setup failed for tx_comp_ring"));
2906 			goto fail1;
2907 		}
2908 		soc->num_tcl_data_rings++;
2909 	}
2910 
2911 	/* Tx specific init */
2912 	if (dp_tx_pdev_attach(pdev)) {
2913 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2914 			FL("dp_tx_pdev_attach failed"));
2915 		goto fail1;
2916 	}
2917 
2918 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2919 	/* Setup per PDEV REO rings if configured */
2920 	if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2921 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
2922 			pdev_id, pdev_id, reo_dst_ring_size)) {
2923 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2924 				FL("dp_srng_setup failed for reo_dest_ringn"));
2925 			goto fail1;
2926 		}
2927 		soc->num_reo_dest_rings++;
2928 
2929 	}
2930 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
2931 		wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx))) {
2932 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2933 			 FL("dp_srng_setup failed rx refill ring"));
2934 		goto fail1;
2935 	}
2936 
2937 	if (dp_rxdma_ring_setup(soc, pdev)) {
2938 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2939 			 FL("RXDMA ring config failed"));
2940 		goto fail1;
2941 	}
2942 
2943 	if (dp_mon_rings_setup(soc, pdev)) {
2944 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2945 			  FL("MONITOR rings setup failed"));
2946 		goto fail1;
2947 	}
2948 
2949 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2950 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2951 		if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
2952 				  0, pdev_id,
2953 				  entries)) {
2954 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2955 				  FL(RNG_ERR "rxdma_err_dst_ring"));
2956 			goto fail1;
2957 		}
2958 	}
2959 
2960 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
2961 		goto fail1;
2962 
2963 	if (dp_ipa_ring_resource_setup(soc, pdev))
2964 		goto fail1;
2965 
2966 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
2967 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2968 			FL("dp_ipa_uc_attach failed"));
2969 		goto fail1;
2970 	}
2971 
2972 	/* Rx specific init */
2973 	if (dp_rx_pdev_attach(pdev)) {
2974 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2975 			FL("dp_rx_pdev_attach failed"));
2976 		goto fail0;
2977 	}
2978 	DP_STATS_INIT(pdev);
2979 
2980 	/* Monitor filter init */
2981 	pdev->mon_filter_mode = MON_FILTER_ALL;
2982 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
2983 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
2984 	pdev->fp_data_filter = FILTER_DATA_ALL;
2985 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
2986 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
2987 	pdev->mo_data_filter = FILTER_DATA_ALL;
2988 
2989 	dp_local_peer_id_pool_init(pdev);
2990 
2991 	dp_dscp_tid_map_setup(pdev);
2992 
2993 	/* Rx monitor mode specific init */
2994 	if (dp_rx_pdev_mon_attach(pdev)) {
2995 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2996 				"dp_rx_pdev_attach failed");
2997 		goto fail1;
2998 	}
2999 
3000 	if (dp_wdi_event_attach(pdev)) {
3001 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3002 				"dp_wdi_evet_attach failed");
3003 		goto fail1;
3004 	}
3005 
3006 	/* set the reo destination during initialization */
3007 	pdev->reo_dest = pdev->pdev_id + 1;
3008 
3009 	/*
3010 	 * initialize ppdu tlv list
3011 	 */
3012 	TAILQ_INIT(&pdev->ppdu_info_list);
3013 	pdev->tlv_count = 0;
3014 	pdev->list_depth = 0;
3015 
3016 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
3017 
3018 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
3019 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
3020 			      TRUE);
3021 
3022 	/* initlialize cal client timer */
3023 	dp_cal_client_attach(&pdev->cal_client_ctx, pdev, pdev->soc->osdev,
3024 			     &dp_iterate_update_peer_list);
3025 
3026 	return (struct cdp_pdev *)pdev;
3027 
3028 fail1:
3029 	dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
3030 
3031 fail0:
3032 	return NULL;
3033 }
3034 
3035 /*
3036 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
3037 * @soc: data path SoC handle
3038 * @pdev: Physical device handle
3039 *
3040 * Return: void
3041 */
3042 #ifdef QCA_HOST2FW_RXBUF_RING
3043 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3044 	 struct dp_pdev *pdev)
3045 {
3046 	int max_mac_rings =
3047 		 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
3048 	int i;
3049 
3050 	max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
3051 				max_mac_rings : MAX_RX_MAC_RINGS;
3052 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
3053 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
3054 			 RXDMA_BUF, 1);
3055 
3056 	qdf_timer_free(&soc->mon_reap_timer);
3057 }
3058 #else
3059 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3060 	 struct dp_pdev *pdev)
3061 {
3062 }
3063 #endif
3064 
3065 /*
3066  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3067  * @pdev: device object
3068  *
3069  * Return: void
3070  */
3071 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3072 {
3073 	struct dp_neighbour_peer *peer = NULL;
3074 	struct dp_neighbour_peer *temp_peer = NULL;
3075 
3076 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3077 			neighbour_peer_list_elem, temp_peer) {
3078 		/* delete this peer from the list */
3079 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
3080 				peer, neighbour_peer_list_elem);
3081 		qdf_mem_free(peer);
3082 	}
3083 
3084 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3085 }
3086 
3087 /**
3088 * dp_htt_ppdu_stats_detach() - detach stats resources
3089 * @pdev: Datapath PDEV handle
3090 *
3091 * Return: void
3092 */
3093 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3094 {
3095 	struct ppdu_info *ppdu_info, *ppdu_info_next;
3096 
3097 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3098 			ppdu_info_list_elem, ppdu_info_next) {
3099 		if (!ppdu_info)
3100 			break;
3101 		qdf_assert_always(ppdu_info->nbuf);
3102 		qdf_nbuf_free(ppdu_info->nbuf);
3103 		qdf_mem_free(ppdu_info);
3104 	}
3105 }
3106 
3107 #if !defined(QCA_WIFI_QCA6390) && !defined(DISABLE_MON_CONFIG)
3108 static
3109 void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3110 			int mac_id)
3111 {
3112 		dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
3113 				RXDMA_MONITOR_BUF, 0);
3114 		dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
3115 				RXDMA_MONITOR_DST, 0);
3116 
3117 		dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring[mac_id],
3118 				RXDMA_MONITOR_STATUS, 0);
3119 
3120 		dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
3121 				RXDMA_MONITOR_DESC, 0);
3122 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3123 				RXDMA_DST, 0);
3124 }
3125 #else
3126 static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3127 			       int mac_id)
3128 {
3129 }
3130 #endif
3131 
3132 /*
3133 * dp_pdev_detach_wifi3() - detach txrx pdev
3134 * @txrx_pdev: Datapath PDEV handle
3135 * @force: Force detach
3136 *
3137 */
3138 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
3139 {
3140 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3141 	struct dp_soc *soc = pdev->soc;
3142 	qdf_nbuf_t curr_nbuf, next_nbuf;
3143 	int mac_id;
3144 
3145 	dp_wdi_event_detach(pdev);
3146 
3147 	dp_tx_pdev_detach(pdev);
3148 
3149 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3150 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
3151 			TCL_DATA, pdev->pdev_id);
3152 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
3153 			WBM2SW_RELEASE, pdev->pdev_id);
3154 	}
3155 
3156 	dp_pktlogmod_exit(pdev);
3157 
3158 	dp_rx_pdev_detach(pdev);
3159 	dp_rx_pdev_mon_detach(pdev);
3160 	dp_neighbour_peers_detach(pdev);
3161 	qdf_spinlock_destroy(&pdev->tx_mutex);
3162 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
3163 
3164 	dp_ipa_uc_detach(soc, pdev);
3165 
3166 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
3167 
3168 	/* Cleanup per PDEV REO rings if configured */
3169 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3170 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
3171 			REO_DST, pdev->pdev_id);
3172 	}
3173 
3174 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
3175 
3176 	dp_rxdma_ring_cleanup(soc, pdev);
3177 
3178 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3179 		dp_mon_ring_deinit(soc, pdev, mac_id);
3180 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3181 			RXDMA_DST, 0);
3182 	}
3183 
3184 	curr_nbuf = pdev->invalid_peer_head_msdu;
3185 	while (curr_nbuf) {
3186 		next_nbuf = qdf_nbuf_next(curr_nbuf);
3187 		qdf_nbuf_free(curr_nbuf);
3188 		curr_nbuf = next_nbuf;
3189 	}
3190 
3191 	dp_htt_ppdu_stats_detach(pdev);
3192 
3193 	qdf_nbuf_free(pdev->sojourn_buf);
3194 
3195 	dp_cal_client_detach(&pdev->cal_client_ctx);
3196 	soc->pdev_list[pdev->pdev_id] = NULL;
3197 	soc->pdev_count--;
3198 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
3199 	qdf_mem_free(pdev->dp_txrx_handle);
3200 	qdf_mem_free(pdev);
3201 }
3202 
3203 /*
3204  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
3205  * @soc: DP SOC handle
3206  */
3207 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
3208 {
3209 	struct reo_desc_list_node *desc;
3210 	struct dp_rx_tid *rx_tid;
3211 
3212 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3213 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
3214 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3215 		rx_tid = &desc->rx_tid;
3216 		qdf_mem_unmap_nbytes_single(soc->osdev,
3217 			rx_tid->hw_qdesc_paddr,
3218 			QDF_DMA_BIDIRECTIONAL,
3219 			rx_tid->hw_qdesc_alloc_size);
3220 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3221 		qdf_mem_free(desc);
3222 	}
3223 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3224 	qdf_list_destroy(&soc->reo_desc_freelist);
3225 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
3226 }
3227 
3228 /*
3229  * dp_soc_detach_wifi3() - Detach txrx SOC
3230  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
3231  */
3232 static void dp_soc_detach_wifi3(void *txrx_soc)
3233 {
3234 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3235 	int i;
3236 
3237 	qdf_atomic_set(&soc->cmn_init_done, 0);
3238 
3239 	qdf_flush_work(&soc->htt_stats.work);
3240 	qdf_disable_work(&soc->htt_stats.work);
3241 
3242 	/* Free pending htt stats messages */
3243 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
3244 
3245 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3246 		if (soc->pdev_list[i])
3247 			dp_pdev_detach_wifi3(
3248 				(struct cdp_pdev *)soc->pdev_list[i], 1);
3249 	}
3250 
3251 	dp_peer_find_detach(soc);
3252 
3253 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
3254 	 * SW descriptors
3255 	 */
3256 
3257 	/* Free the ring memories */
3258 	/* Common rings */
3259 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3260 
3261 	dp_tx_soc_detach(soc);
3262 	/* Tx data rings */
3263 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3264 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3265 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
3266 				TCL_DATA, i);
3267 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
3268 				WBM2SW_RELEASE, i);
3269 		}
3270 	}
3271 
3272 	/* TCL command and status rings */
3273 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3274 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3275 
3276 	/* Rx data rings */
3277 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3278 		soc->num_reo_dest_rings =
3279 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3280 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
3281 			/* TODO: Get number of rings and ring sizes
3282 			 * from wlan_cfg
3283 			 */
3284 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
3285 				REO_DST, i);
3286 		}
3287 	}
3288 	/* REO reinjection ring */
3289 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3290 
3291 	/* Rx release ring */
3292 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3293 
3294 	/* Rx exception ring */
3295 	/* TODO: Better to store ring_type and ring_num in
3296 	 * dp_srng during setup
3297 	 */
3298 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3299 
3300 	/* REO command and status rings */
3301 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3302 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
3303 	dp_hw_link_desc_pool_cleanup(soc);
3304 
3305 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
3306 	qdf_spinlock_destroy(&soc->htt_stats.lock);
3307 
3308 	htt_soc_detach(soc->htt_handle);
3309 
3310 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3311 
3312 	dp_reo_cmdlist_destroy(soc);
3313 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3314 	dp_reo_desc_freelist_destroy(soc);
3315 
3316 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
3317 
3318 	dp_soc_wds_detach(soc);
3319 	qdf_spinlock_destroy(&soc->ast_lock);
3320 
3321 	qdf_mem_free(soc);
3322 }
3323 
3324 #if !defined(QCA_WIFI_QCA6390) && !defined(DISABLE_MON_CONFIG)
3325 static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3326 				  struct dp_pdev *pdev,
3327 				  int mac_id,
3328 				  int mac_for_pdev)
3329 {
3330 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3331 		       pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3332 		       RXDMA_MONITOR_BUF);
3333 
3334 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3335 		       pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3336 		       RXDMA_MONITOR_DST);
3337 
3338 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3339 		       pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3340 		       RXDMA_MONITOR_STATUS);
3341 
3342 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3343 		       pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3344 		       RXDMA_MONITOR_DESC);
3345 }
3346 #else
3347 static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3348 				  struct dp_pdev *pdev,
3349 				  int mac_id,
3350 				  int mac_for_pdev)
3351 {
3352 }
3353 #endif
3354 /*
3355  * dp_rxdma_ring_config() - configure the RX DMA rings
3356  *
3357  * This function is used to configure the MAC rings.
3358  * On MCL host provides buffers in Host2FW ring
3359  * FW refills (copies) buffers to the ring and updates
3360  * ring_idx in register
3361  *
3362  * @soc: data path SoC handle
3363  *
3364  * Return: void
3365  */
3366 #ifdef QCA_HOST2FW_RXBUF_RING
3367 static void dp_rxdma_ring_config(struct dp_soc *soc)
3368 {
3369 	int i;
3370 
3371 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3372 		struct dp_pdev *pdev = soc->pdev_list[i];
3373 
3374 		if (pdev) {
3375 			int mac_id;
3376 			bool dbs_enable = 0;
3377 			int max_mac_rings =
3378 				 wlan_cfg_get_num_mac_rings
3379 				(pdev->wlan_cfg_ctx);
3380 
3381 			htt_srng_setup(soc->htt_handle, 0,
3382 				 pdev->rx_refill_buf_ring.hal_srng,
3383 				 RXDMA_BUF);
3384 
3385 			if (pdev->rx_refill_buf_ring2.hal_srng)
3386 				htt_srng_setup(soc->htt_handle, 0,
3387 					pdev->rx_refill_buf_ring2.hal_srng,
3388 					RXDMA_BUF);
3389 
3390 			if (soc->cdp_soc.ol_ops->
3391 				is_hw_dbs_2x2_capable) {
3392 				dbs_enable = soc->cdp_soc.ol_ops->
3393 					is_hw_dbs_2x2_capable(soc->ctrl_psoc);
3394 			}
3395 
3396 			if (dbs_enable) {
3397 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3398 				QDF_TRACE_LEVEL_ERROR,
3399 				FL("DBS enabled max_mac_rings %d"),
3400 					 max_mac_rings);
3401 			} else {
3402 				max_mac_rings = 1;
3403 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3404 					 QDF_TRACE_LEVEL_ERROR,
3405 					 FL("DBS disabled, max_mac_rings %d"),
3406 					 max_mac_rings);
3407 			}
3408 
3409 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3410 					 FL("pdev_id %d max_mac_rings %d"),
3411 					 pdev->pdev_id, max_mac_rings);
3412 
3413 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
3414 				int mac_for_pdev = dp_get_mac_id_for_pdev(
3415 							mac_id, pdev->pdev_id);
3416 
3417 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3418 					 QDF_TRACE_LEVEL_ERROR,
3419 					 FL("mac_id %d"), mac_for_pdev);
3420 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3421 					 pdev->rx_mac_buf_ring[mac_id]
3422 						.hal_srng,
3423 					 RXDMA_BUF);
3424 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3425 					pdev->rxdma_err_dst_ring[mac_id]
3426 						.hal_srng,
3427 					RXDMA_DST);
3428 
3429 				/* Configure monitor mode rings */
3430 				dp_mon_htt_srng_setup(soc, pdev, mac_id,
3431 						      mac_for_pdev);
3432 
3433 			}
3434 		}
3435 	}
3436 
3437 	/*
3438 	 * Timer to reap rxdma status rings.
3439 	 * Needed until we enable ppdu end interrupts
3440 	 */
3441 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
3442 			dp_service_mon_rings, (void *)soc,
3443 			QDF_TIMER_TYPE_WAKE_APPS);
3444 	soc->reap_timer_init = 1;
3445 }
3446 #else
3447 /* This is only for WIN */
3448 static void dp_rxdma_ring_config(struct dp_soc *soc)
3449 {
3450 	int i;
3451 	int mac_id;
3452 
3453 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3454 		struct dp_pdev *pdev = soc->pdev_list[i];
3455 
3456 		if (pdev == NULL)
3457 			continue;
3458 
3459 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3460 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
3461 
3462 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3463 				pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
3464 #ifndef DISABLE_MON_CONFIG
3465 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3466 				pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3467 				RXDMA_MONITOR_BUF);
3468 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3469 				pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3470 				RXDMA_MONITOR_DST);
3471 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3472 				pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3473 				RXDMA_MONITOR_STATUS);
3474 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3475 				pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3476 				RXDMA_MONITOR_DESC);
3477 #endif
3478 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3479 				pdev->rxdma_err_dst_ring[mac_id].hal_srng,
3480 				RXDMA_DST);
3481 		}
3482 	}
3483 }
3484 #endif
3485 
3486 /*
3487  * dp_soc_attach_target_wifi3() - SOC initialization in the target
3488  * @txrx_soc: Datapath SOC handle
3489  */
3490 static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
3491 {
3492 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
3493 
3494 	htt_soc_attach_target(soc->htt_handle);
3495 
3496 	dp_rxdma_ring_config(soc);
3497 
3498 	DP_STATS_INIT(soc);
3499 
3500 	/* initialize work queue for stats processing */
3501 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
3502 
3503 	return 0;
3504 }
3505 
3506 /*
3507  * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
3508  * @txrx_soc: Datapath SOC handle
3509  */
3510 static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
3511 {
3512 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3513 	return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
3514 }
3515 /*
3516  * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
3517  * @txrx_soc: Datapath SOC handle
3518  * @nss_cfg: nss config
3519  */
3520 static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
3521 {
3522 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3523 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
3524 
3525 	wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
3526 
3527 	/*
3528 	 * TODO: masked out based on the per offloaded radio
3529 	 */
3530 	if (config == dp_nss_cfg_dbdc) {
3531 		wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
3532 		wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
3533 		wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
3534 		wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
3535 	}
3536 
3537 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3538 		  FL("nss-wifi<0> nss config is enabled"));
3539 }
3540 /*
3541 * dp_vdev_attach_wifi3() - attach txrx vdev
3542 * @txrx_pdev: Datapath PDEV handle
3543 * @vdev_mac_addr: MAC address of the virtual interface
3544 * @vdev_id: VDEV Id
3545 * @wlan_op_mode: VDEV operating mode
3546 *
3547 * Return: DP VDEV handle on success, NULL on failure
3548 */
3549 static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
3550 	uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
3551 {
3552 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3553 	struct dp_soc *soc = pdev->soc;
3554 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
3555 
3556 	if (!vdev) {
3557 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3558 			FL("DP VDEV memory allocation failed"));
3559 		goto fail0;
3560 	}
3561 
3562 	vdev->pdev = pdev;
3563 	vdev->vdev_id = vdev_id;
3564 	vdev->opmode = op_mode;
3565 	vdev->osdev = soc->osdev;
3566 
3567 	vdev->osif_rx = NULL;
3568 	vdev->osif_rsim_rx_decap = NULL;
3569 	vdev->osif_get_key = NULL;
3570 	vdev->osif_rx_mon = NULL;
3571 	vdev->osif_tx_free_ext = NULL;
3572 	vdev->osif_vdev = NULL;
3573 
3574 	vdev->delete.pending = 0;
3575 	vdev->safemode = 0;
3576 	vdev->drop_unenc = 1;
3577 	vdev->sec_type = cdp_sec_type_none;
3578 #ifdef notyet
3579 	vdev->filters_num = 0;
3580 #endif
3581 
3582 	qdf_mem_copy(
3583 		&vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3584 
3585 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3586 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3587 	vdev->dscp_tid_map_id = 0;
3588 	vdev->mcast_enhancement_en = 0;
3589 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
3590 
3591 	/* TODO: Initialize default HTT meta data that will be used in
3592 	 * TCL descriptors for packets transmitted from this VDEV
3593 	 */
3594 
3595 	TAILQ_INIT(&vdev->peer_list);
3596 
3597 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3598 	/* add this vdev into the pdev's list */
3599 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
3600 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3601 	pdev->vdev_count++;
3602 
3603 	dp_tx_vdev_attach(vdev);
3604 
3605 
3606 	if ((soc->intr_mode == DP_INTR_POLL) &&
3607 			wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
3608 		if (pdev->vdev_count == 1)
3609 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
3610 	}
3611 
3612 	if (pdev->vdev_count == 1)
3613 		dp_lro_hash_setup(soc, pdev);
3614 
3615 	/* LRO */
3616 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3617 		wlan_op_mode_sta == vdev->opmode)
3618 		vdev->lro_enable = true;
3619 
3620 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3621 		 "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable);
3622 
3623 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3624 		"Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
3625 	DP_STATS_INIT(vdev);
3626 
3627 	if (wlan_op_mode_sta == vdev->opmode)
3628 		dp_peer_create_wifi3((struct cdp_vdev *)vdev,
3629 							vdev->mac_addr.raw,
3630 							NULL);
3631 
3632 	return (struct cdp_vdev *)vdev;
3633 
3634 fail0:
3635 	return NULL;
3636 }
3637 
3638 /**
3639  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
3640  * @vdev: Datapath VDEV handle
3641  * @osif_vdev: OSIF vdev handle
3642  * @ctrl_vdev: UMAC vdev handle
3643  * @txrx_ops: Tx and Rx operations
3644  *
3645  * Return: DP VDEV handle on success, NULL on failure
3646  */
3647 static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
3648 	void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
3649 	struct ol_txrx_ops *txrx_ops)
3650 {
3651 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3652 	vdev->osif_vdev = osif_vdev;
3653 	vdev->ctrl_vdev = ctrl_vdev;
3654 	vdev->osif_rx = txrx_ops->rx.rx;
3655 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
3656 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
3657 	vdev->osif_get_key = txrx_ops->get_key;
3658 	vdev->osif_rx_mon = txrx_ops->rx.mon;
3659 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
3660 #ifdef notyet
3661 #if ATH_SUPPORT_WAPI
3662 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
3663 #endif
3664 #endif
3665 #ifdef UMAC_SUPPORT_PROXY_ARP
3666 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
3667 #endif
3668 	vdev->me_convert = txrx_ops->me_convert;
3669 
3670 	/* TODO: Enable the following once Tx code is integrated */
3671 	if (vdev->mesh_vdev)
3672 		txrx_ops->tx.tx = dp_tx_send_mesh;
3673 	else
3674 		txrx_ops->tx.tx = dp_tx_send;
3675 
3676 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
3677 
3678 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
3679 		"DP Vdev Register success");
3680 }
3681 
3682 /**
3683  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
3684  * @vdev: Datapath VDEV handle
3685  *
3686  * Return: void
3687  */
3688 static void dp_vdev_flush_peers(struct dp_vdev *vdev)
3689 {
3690 	struct dp_pdev *pdev = vdev->pdev;
3691 	struct dp_soc *soc = pdev->soc;
3692 	struct dp_peer *peer;
3693 	uint16_t *peer_ids;
3694 	uint8_t i = 0, j = 0;
3695 
3696 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
3697 	if (!peer_ids) {
3698 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3699 			"DP alloc failure - unable to flush peers");
3700 		return;
3701 	}
3702 
3703 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3704 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3705 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3706 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
3707 				if (j < soc->max_peers)
3708 					peer_ids[j++] = peer->peer_ids[i];
3709 	}
3710 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3711 
3712 	for (i = 0; i < j ; i++)
3713 		dp_rx_peer_unmap_handler(soc, peer_ids[i], vdev->vdev_id,
3714 					 NULL, 0);
3715 
3716 	qdf_mem_free(peer_ids);
3717 
3718 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3719 		FL("Flushed peers for vdev object %pK "), vdev);
3720 }
3721 
3722 /*
3723  * dp_vdev_detach_wifi3() - Detach txrx vdev
3724  * @txrx_vdev:		Datapath VDEV handle
3725  * @callback:		Callback OL_IF on completion of detach
3726  * @cb_context:	Callback context
3727  *
3728  */
3729 static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
3730 	ol_txrx_vdev_delete_cb callback, void *cb_context)
3731 {
3732 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3733 	struct dp_pdev *pdev = vdev->pdev;
3734 	struct dp_soc *soc = pdev->soc;
3735 	struct dp_neighbour_peer *peer = NULL;
3736 
3737 	/* preconditions */
3738 	qdf_assert(vdev);
3739 
3740 
3741 	if (wlan_op_mode_sta == vdev->opmode)
3742 		dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
3743 
3744 	/*
3745 	 * If Target is hung, flush all peers before detaching vdev
3746 	 * this will free all references held due to missing
3747 	 * unmap commands from Target
3748 	 */
3749 	if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
3750 		dp_vdev_flush_peers(vdev);
3751 
3752 	/*
3753 	 * Use peer_ref_mutex while accessing peer_list, in case
3754 	 * a peer is in the process of being removed from the list.
3755 	 */
3756 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3757 	/* check that the vdev has no peers allocated */
3758 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
3759 		/* debug print - will be removed later */
3760 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
3761 			FL("not deleting vdev object %pK (%pM)"
3762 			"until deletion finishes for all its peers"),
3763 			vdev, vdev->mac_addr.raw);
3764 		/* indicate that the vdev needs to be deleted */
3765 		vdev->delete.pending = 1;
3766 		vdev->delete.callback = callback;
3767 		vdev->delete.context = cb_context;
3768 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3769 		return;
3770 	}
3771 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3772 
3773 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3774 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
3775 		      neighbour_peer_list_elem) {
3776 		QDF_ASSERT(peer->vdev != vdev);
3777 	}
3778 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3779 
3780 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3781 	dp_tx_vdev_detach(vdev);
3782 	/* remove the vdev from its parent pdev's list */
3783 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
3784 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3785 		FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
3786 	qdf_mem_free(vdev);
3787 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3788 
3789 	if (callback)
3790 		callback(cb_context);
3791 }
3792 
3793 /*
3794  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
3795  * @soc - datapath soc handle
3796  * @peer - datapath peer handle
3797  *
3798  * Delete the AST entries belonging to a peer
3799  */
3800 #ifdef FEATURE_AST
3801 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3802 					      struct dp_peer *peer)
3803 {
3804 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
3805 
3806 	qdf_spin_lock_bh(&soc->ast_lock);
3807 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
3808 		dp_peer_del_ast(soc, ast_entry);
3809 
3810 	peer->self_ast_entry = NULL;
3811 	TAILQ_INIT(&peer->ast_entry_list);
3812 	qdf_spin_unlock_bh(&soc->ast_lock);
3813 }
3814 #else
3815 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3816 					      struct dp_peer *peer)
3817 {
3818 }
3819 #endif
3820 
3821 #if ATH_SUPPORT_WRAP
3822 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
3823 						uint8_t *peer_mac_addr)
3824 {
3825 	struct dp_peer *peer;
3826 
3827 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
3828 				      0, vdev->vdev_id);
3829 	if (!peer)
3830 		return NULL;
3831 
3832 	if (peer->bss_peer)
3833 		return peer;
3834 
3835 	dp_peer_unref_delete(peer);
3836 	return NULL;
3837 }
3838 #else
3839 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
3840 						uint8_t *peer_mac_addr)
3841 {
3842 	struct dp_peer *peer;
3843 
3844 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
3845 				      0, vdev->vdev_id);
3846 	if (!peer)
3847 		return NULL;
3848 
3849 	if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
3850 		return peer;
3851 
3852 	dp_peer_unref_delete(peer);
3853 	return NULL;
3854 }
3855 #endif
3856 
3857 #if defined(FEATURE_AST) && !defined(AST_HKV1_WORKAROUND)
3858 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
3859 					       uint8_t *peer_mac_addr)
3860 {
3861 	struct dp_ast_entry *ast_entry;
3862 
3863 	qdf_spin_lock_bh(&soc->ast_lock);
3864 	ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
3865 	if (ast_entry && ast_entry->next_hop)
3866 		dp_peer_del_ast(soc, ast_entry);
3867 	qdf_spin_unlock_bh(&soc->ast_lock);
3868 }
3869 #else
3870 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
3871 					       uint8_t *peer_mac_addr)
3872 {
3873 }
3874 #endif
3875 
3876 /*
3877  * dp_peer_create_wifi3() - attach txrx peer
3878  * @txrx_vdev: Datapath VDEV handle
3879  * @peer_mac_addr: Peer MAC address
3880  *
3881  * Return: DP peeer handle on success, NULL on failure
3882  */
3883 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
3884 		uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
3885 {
3886 	struct dp_peer *peer;
3887 	int i;
3888 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3889 	struct dp_pdev *pdev;
3890 	struct dp_soc *soc;
3891 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
3892 
3893 	/* preconditions */
3894 	qdf_assert(vdev);
3895 	qdf_assert(peer_mac_addr);
3896 
3897 	pdev = vdev->pdev;
3898 	soc = pdev->soc;
3899 
3900 	/*
3901 	 * If a peer entry with given MAC address already exists,
3902 	 * reuse the peer and reset the state of peer.
3903 	 */
3904 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
3905 
3906 	if (peer) {
3907 		qdf_atomic_init(&peer->is_default_route_set);
3908 		dp_peer_cleanup(vdev, peer);
3909 
3910 		peer->delete_in_progress = false;
3911 
3912 		dp_peer_delete_ast_entries(soc, peer);
3913 
3914 		if ((vdev->opmode == wlan_op_mode_sta) &&
3915 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
3916 		     DP_MAC_ADDR_LEN)) {
3917 			ast_type = CDP_TXRX_AST_TYPE_SELF;
3918 		}
3919 
3920 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
3921 
3922 		/*
3923 		* Control path maintains a node count which is incremented
3924 		* for every new peer create command. Since new peer is not being
3925 		* created and earlier reference is reused here,
3926 		* peer_unref_delete event is sent to control path to
3927 		* increment the count back.
3928 		*/
3929 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
3930 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
3931 				vdev->vdev_id, peer->mac_addr.raw);
3932 		}
3933 		peer->ctrl_peer = ctrl_peer;
3934 
3935 		dp_local_peer_id_alloc(pdev, peer);
3936 		DP_STATS_INIT(peer);
3937 
3938 		return (void *)peer;
3939 	} else {
3940 		/*
3941 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
3942 		 * need to remove the AST entry which was earlier added as a WDS
3943 		 * entry.
3944 		 * If an AST entry exists, but no peer entry exists with a given
3945 		 * MAC addresses, we could deduce it as a WDS entry
3946 		 */
3947 		dp_peer_ast_handle_roam_del(soc, peer_mac_addr);
3948 	}
3949 
3950 #ifdef notyet
3951 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
3952 		soc->mempool_ol_ath_peer);
3953 #else
3954 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
3955 #endif
3956 
3957 	if (!peer)
3958 		return NULL; /* failure */
3959 
3960 	qdf_mem_zero(peer, sizeof(struct dp_peer));
3961 
3962 	TAILQ_INIT(&peer->ast_entry_list);
3963 
3964 	/* store provided params */
3965 	peer->vdev = vdev;
3966 	peer->ctrl_peer = ctrl_peer;
3967 
3968 	if ((vdev->opmode == wlan_op_mode_sta) &&
3969 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
3970 			 DP_MAC_ADDR_LEN)) {
3971 		ast_type = CDP_TXRX_AST_TYPE_SELF;
3972 	}
3973 
3974 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
3975 
3976 	qdf_spinlock_create(&peer->peer_info_lock);
3977 
3978 	qdf_mem_copy(
3979 		&peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3980 
3981 	/* TODO: See of rx_opt_proc is really required */
3982 	peer->rx_opt_proc = soc->rx_opt_proc;
3983 
3984 	/* initialize the peer_id */
3985 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3986 		peer->peer_ids[i] = HTT_INVALID_PEER;
3987 
3988 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3989 
3990 	qdf_atomic_init(&peer->ref_cnt);
3991 
3992 	/* keep one reference for attach */
3993 	qdf_atomic_inc(&peer->ref_cnt);
3994 
3995 	/* add this peer into the vdev's list */
3996 	if (wlan_op_mode_sta == vdev->opmode)
3997 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
3998 	else
3999 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
4000 
4001 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4002 
4003 	/* TODO: See if hash based search is required */
4004 	dp_peer_find_hash_add(soc, peer);
4005 
4006 	/* Initialize the peer state */
4007 	peer->state = OL_TXRX_PEER_STATE_DISC;
4008 
4009 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4010 		"vdev %pK created peer %pK (%pM) ref_cnt: %d",
4011 		vdev, peer, peer->mac_addr.raw,
4012 		qdf_atomic_read(&peer->ref_cnt));
4013 	/*
4014 	 * For every peer MAp message search and set if bss_peer
4015 	 */
4016 	if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
4017 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4018 			"vdev bss_peer!!!!");
4019 		peer->bss_peer = 1;
4020 		vdev->vap_bss_peer = peer;
4021 	}
4022 	for (i = 0; i < DP_MAX_TIDS; i++)
4023 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
4024 
4025 	dp_local_peer_id_alloc(pdev, peer);
4026 	DP_STATS_INIT(peer);
4027 	return (void *)peer;
4028 }
4029 
4030 /*
4031  * dp_peer_setup_wifi3() - initialize the peer
4032  * @vdev_hdl: virtual device object
4033  * @peer: Peer object
4034  *
4035  * Return: void
4036  */
4037 static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
4038 {
4039 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
4040 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4041 	struct dp_pdev *pdev;
4042 	struct dp_soc *soc;
4043 	bool hash_based = 0;
4044 	enum cdp_host_reo_dest_ring reo_dest;
4045 
4046 	/* preconditions */
4047 	qdf_assert(vdev);
4048 	qdf_assert(peer);
4049 
4050 	pdev = vdev->pdev;
4051 	soc = pdev->soc;
4052 
4053 	peer->last_assoc_rcvd = 0;
4054 	peer->last_disassoc_rcvd = 0;
4055 	peer->last_deauth_rcvd = 0;
4056 
4057 	/*
4058 	 * hash based steering is disabled for Radios which are offloaded
4059 	 * to NSS
4060 	 */
4061 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
4062 		hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
4063 
4064 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4065 		FL("hash based steering for pdev: %d is %d"),
4066 		pdev->pdev_id, hash_based);
4067 
4068 	/*
4069 	 * Below line of code will ensure the proper reo_dest ring is chosen
4070 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
4071 	 */
4072 	reo_dest = pdev->reo_dest;
4073 
4074 	/*
4075 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
4076 	 * i.e both the devices have same MAC address. In these
4077 	 * cases we want such pkts to be processed in NULL Q handler
4078 	 * which is REO2TCL ring. for this reason we should
4079 	 * not setup reo_queues and default route for bss_peer.
4080 	 */
4081 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap)
4082 		return;
4083 
4084 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
4085 		/* TODO: Check the destination ring number to be passed to FW */
4086 		soc->cdp_soc.ol_ops->peer_set_default_routing(
4087 				pdev->ctrl_pdev, peer->mac_addr.raw,
4088 				peer->vdev->vdev_id, hash_based, reo_dest);
4089 	}
4090 
4091 	qdf_atomic_set(&peer->is_default_route_set, 1);
4092 
4093 	dp_peer_rx_init(pdev, peer);
4094 	return;
4095 }
4096 
4097 /*
4098  * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
4099  * @vdev_handle: virtual device object
4100  * @htt_pkt_type: type of pkt
4101  *
4102  * Return: void
4103  */
4104 static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
4105 	 enum htt_cmn_pkt_type val)
4106 {
4107 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4108 	vdev->tx_encap_type = val;
4109 }
4110 
4111 /*
4112  * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
4113  * @vdev_handle: virtual device object
4114  * @htt_pkt_type: type of pkt
4115  *
4116  * Return: void
4117  */
4118 static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
4119 	 enum htt_cmn_pkt_type val)
4120 {
4121 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4122 	vdev->rx_decap_type = val;
4123 }
4124 
4125 /*
4126  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
4127  * @txrx_soc: cdp soc handle
4128  * @ac: Access category
4129  * @value: timeout value in millisec
4130  *
4131  * Return: void
4132  */
4133 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
4134 				    uint8_t ac, uint32_t value)
4135 {
4136 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4137 
4138 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
4139 }
4140 
4141 /*
4142  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
4143  * @txrx_soc: cdp soc handle
4144  * @ac: access category
4145  * @value: timeout value in millisec
4146  *
4147  * Return: void
4148  */
4149 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
4150 				    uint8_t ac, uint32_t *value)
4151 {
4152 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4153 
4154 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
4155 }
4156 
4157 /*
4158  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
4159  * @pdev_handle: physical device object
4160  * @val: reo destination ring index (1 - 4)
4161  *
4162  * Return: void
4163  */
4164 static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
4165 	 enum cdp_host_reo_dest_ring val)
4166 {
4167 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4168 
4169 	if (pdev)
4170 		pdev->reo_dest = val;
4171 }
4172 
4173 /*
4174  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
4175  * @pdev_handle: physical device object
4176  *
4177  * Return: reo destination ring index
4178  */
4179 static enum cdp_host_reo_dest_ring
4180 dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
4181 {
4182 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4183 
4184 	if (pdev)
4185 		return pdev->reo_dest;
4186 	else
4187 		return cdp_host_reo_dest_ring_unknown;
4188 }
4189 
4190 /*
4191  * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
4192  * @pdev_handle: device object
4193  * @val: value to be set
4194  *
4195  * Return: void
4196  */
4197 static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
4198 	 uint32_t val)
4199 {
4200 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4201 
4202 	/* Enable/Disable smart mesh filtering. This flag will be checked
4203 	 * during rx processing to check if packets are from NAC clients.
4204 	 */
4205 	pdev->filter_neighbour_peers = val;
4206 	return 0;
4207 }
4208 
4209 /*
4210  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
4211  * address for smart mesh filtering
4212  * @vdev_handle: virtual device object
4213  * @cmd: Add/Del command
4214  * @macaddr: nac client mac address
4215  *
4216  * Return: void
4217  */
4218 static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
4219 					    uint32_t cmd, uint8_t *macaddr)
4220 {
4221 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4222 	struct dp_pdev *pdev = vdev->pdev;
4223 	struct dp_neighbour_peer *peer = NULL;
4224 
4225 	if (!macaddr)
4226 		goto fail0;
4227 
4228 	/* Store address of NAC (neighbour peer) which will be checked
4229 	 * against TA of received packets.
4230 	 */
4231 	if (cmd == DP_NAC_PARAM_ADD) {
4232 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
4233 				sizeof(*peer));
4234 
4235 		if (!peer) {
4236 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4237 				FL("DP neighbour peer node memory allocation failed"));
4238 			goto fail0;
4239 		}
4240 
4241 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
4242 			macaddr, DP_MAC_ADDR_LEN);
4243 		peer->vdev = vdev;
4244 
4245 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4246 
4247 		/* add this neighbour peer into the list */
4248 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
4249 				neighbour_peer_list_elem);
4250 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4251 
4252 		/* first neighbour */
4253 		if (!pdev->neighbour_peers_added) {
4254 			pdev->neighbour_peers_added = true;
4255 			dp_ppdu_ring_cfg(pdev);
4256 		}
4257 		return 1;
4258 
4259 	} else if (cmd == DP_NAC_PARAM_DEL) {
4260 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4261 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
4262 				neighbour_peer_list_elem) {
4263 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
4264 				macaddr, DP_MAC_ADDR_LEN)) {
4265 				/* delete this peer from the list */
4266 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
4267 					peer, neighbour_peer_list_elem);
4268 				qdf_mem_free(peer);
4269 				break;
4270 			}
4271 		}
4272 		/* last neighbour deleted */
4273 		if (TAILQ_EMPTY(&pdev->neighbour_peers_list))
4274 			pdev->neighbour_peers_added = false;
4275 
4276 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4277 
4278 		if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
4279 		    !pdev->enhanced_stats_en)
4280 			dp_ppdu_ring_reset(pdev);
4281 		return 1;
4282 
4283 	}
4284 
4285 fail0:
4286 	return 0;
4287 }
4288 
4289 /*
4290  * dp_get_sec_type() - Get the security type
4291  * @peer:		Datapath peer handle
4292  * @sec_idx:    Security id (mcast, ucast)
4293  *
4294  * return sec_type: Security type
4295  */
4296 static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
4297 {
4298 	struct dp_peer *dpeer = (struct dp_peer *)peer;
4299 
4300 	return dpeer->security[sec_idx].sec_type;
4301 }
4302 
4303 /*
4304  * dp_peer_authorize() - authorize txrx peer
4305  * @peer_handle:		Datapath peer handle
4306  * @authorize
4307  *
4308  */
4309 static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
4310 {
4311 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4312 	struct dp_soc *soc;
4313 
4314 	if (peer != NULL) {
4315 		soc = peer->vdev->pdev->soc;
4316 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
4317 		peer->authorize = authorize ? 1 : 0;
4318 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4319 	}
4320 }
4321 
4322 static void dp_reset_and_release_peer_mem(struct dp_soc *soc,
4323 					  struct dp_pdev *pdev,
4324 					  struct dp_peer *peer,
4325 					  uint32_t vdev_id)
4326 {
4327 	struct dp_vdev *vdev = NULL;
4328 	struct dp_peer *bss_peer = NULL;
4329 	uint8_t *m_addr = NULL;
4330 
4331 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4332 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4333 		if (vdev->vdev_id == vdev_id)
4334 			break;
4335 	}
4336 	if (!vdev) {
4337 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4338 			  "vdev is NULL");
4339 	} else {
4340 		if (vdev->vap_bss_peer == peer)
4341 		    vdev->vap_bss_peer = NULL;
4342 		m_addr = peer->mac_addr.raw;
4343 		if (soc->cdp_soc.ol_ops->peer_unref_delete)
4344 		    soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
4345 							   vdev_id, m_addr);
4346 		if (vdev && vdev->vap_bss_peer) {
4347 		    bss_peer = vdev->vap_bss_peer;
4348 		    DP_UPDATE_STATS(vdev, peer);
4349 		}
4350 	}
4351 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4352 	qdf_mem_free(peer);
4353 }
4354 
4355 /**
4356  * dp_delete_pending_vdev() - check and process vdev delete
4357  * @pdev: DP specific pdev pointer
4358  * @vdev: DP specific vdev pointer
4359  * @vdev_id: vdev id corresponding to vdev
4360  *
4361  * This API does following:
4362  * 1) It releases tx flow pools buffers as vdev is
4363  *    going down and no peers are associated.
4364  * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory
4365  */
4366 static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev,
4367 				   uint8_t vdev_id)
4368 {
4369 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
4370 	void *vdev_delete_context = NULL;
4371 
4372 	vdev_delete_cb = vdev->delete.callback;
4373 	vdev_delete_context = vdev->delete.context;
4374 
4375 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4376 		  FL("deleting vdev object %pK (%pM)- its last peer is done"),
4377 		  vdev, vdev->mac_addr.raw);
4378 	/* all peers are gone, go ahead and delete it */
4379 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
4380 			FLOW_TYPE_VDEV, vdev_id);
4381 	dp_tx_vdev_detach(vdev);
4382 
4383 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4384 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
4385 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4386 
4387 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4388 		  FL("deleting vdev object %pK (%pM)"),
4389 		  vdev, vdev->mac_addr.raw);
4390 	qdf_mem_free(vdev);
4391 	vdev = NULL;
4392 
4393 	if (vdev_delete_cb)
4394 		vdev_delete_cb(vdev_delete_context);
4395 }
4396 
4397 /*
4398  * dp_peer_unref_delete() - unref and delete peer
4399  * @peer_handle:		Datapath peer handle
4400  *
4401  */
4402 void dp_peer_unref_delete(void *peer_handle)
4403 {
4404 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4405 	struct dp_vdev *vdev = peer->vdev;
4406 	struct dp_pdev *pdev = vdev->pdev;
4407 	struct dp_soc *soc = pdev->soc;
4408 	struct dp_peer *tmppeer;
4409 	int found = 0;
4410 	uint16_t peer_id;
4411 	uint16_t vdev_id;
4412 	bool delete_vdev;
4413 
4414 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4415 		  "%s: peer %pK ref_cnt(before decrement): %d\n", __func__,
4416 		  peer, qdf_atomic_read(&peer->ref_cnt));
4417 	/*
4418 	 * Hold the lock all the way from checking if the peer ref count
4419 	 * is zero until the peer references are removed from the hash
4420 	 * table and vdev list (if the peer ref count is zero).
4421 	 * This protects against a new HL tx operation starting to use the
4422 	 * peer object just after this function concludes it's done being used.
4423 	 * Furthermore, the lock needs to be held while checking whether the
4424 	 * vdev's list of peers is empty, to make sure that list is not modified
4425 	 * concurrently with the empty check.
4426 	 */
4427 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4428 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
4429 		peer_id = peer->peer_ids[0];
4430 		vdev_id = vdev->vdev_id;
4431 
4432 		/*
4433 		 * Make sure that the reference to the peer in
4434 		 * peer object map is removed
4435 		 */
4436 		if (peer_id != HTT_INVALID_PEER)
4437 			soc->peer_id_to_obj_map[peer_id] = NULL;
4438 
4439 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4440 			"Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
4441 
4442 		/* remove the reference to the peer from the hash table */
4443 		dp_peer_find_hash_remove(soc, peer);
4444 
4445 		qdf_spin_lock_bh(&soc->ast_lock);
4446 		if (peer->self_ast_entry) {
4447 			dp_peer_del_ast(soc, peer->self_ast_entry);
4448 			peer->self_ast_entry = NULL;
4449 		}
4450 		qdf_spin_unlock_bh(&soc->ast_lock);
4451 
4452 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
4453 			if (tmppeer == peer) {
4454 				found = 1;
4455 				break;
4456 			}
4457 		}
4458 
4459 		if (found) {
4460 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
4461 				peer_list_elem);
4462 		} else {
4463 			/*Ignoring the remove operation as peer not found*/
4464 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4465 				  "peer:%pK not found in vdev:%pK peerlist:%pK",
4466 				  peer, vdev, &peer->vdev->peer_list);
4467 		}
4468 
4469 		/* cleanup the peer data */
4470 		dp_peer_cleanup(vdev, peer);
4471 
4472 		/* check whether the parent vdev has no peers left */
4473 		if (TAILQ_EMPTY(&vdev->peer_list)) {
4474 			/*
4475 			 * capture vdev delete pending flag's status
4476 			 * while holding peer_ref_mutex lock
4477 			 */
4478 			delete_vdev = vdev->delete.pending;
4479 			/*
4480 			 * Now that there are no references to the peer, we can
4481 			 * release the peer reference lock.
4482 			 */
4483 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4484 			/*
4485 			 * Check if the parent vdev was waiting for its peers
4486 			 * to be deleted, in order for it to be deleted too.
4487 			 */
4488 			if (delete_vdev)
4489 				dp_delete_pending_vdev(pdev, vdev, vdev_id);
4490 		} else {
4491 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4492 		}
4493 		dp_reset_and_release_peer_mem(soc, pdev, peer, vdev_id);
4494 
4495 	} else {
4496 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4497 	}
4498 }
4499 
4500 /*
4501  * dp_peer_detach_wifi3() – Detach txrx peer
4502  * @peer_handle: Datapath peer handle
4503  * @bitmap: bitmap indicating special handling of request.
4504  *
4505  */
4506 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
4507 {
4508 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4509 
4510 	/* redirect the peer's rx delivery function to point to a
4511 	 * discard func
4512 	 */
4513 
4514 	peer->rx_opt_proc = dp_rx_discard;
4515 	peer->ctrl_peer = NULL;
4516 
4517 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4518 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
4519 
4520 	dp_local_peer_id_free(peer->vdev->pdev, peer);
4521 	qdf_spinlock_destroy(&peer->peer_info_lock);
4522 
4523 	/*
4524 	 * Remove the reference added during peer_attach.
4525 	 * The peer will still be left allocated until the
4526 	 * PEER_UNMAP message arrives to remove the other
4527 	 * reference, added by the PEER_MAP message.
4528 	 */
4529 	dp_peer_unref_delete(peer_handle);
4530 }
4531 
4532 /*
4533  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
4534  * @peer_handle:		Datapath peer handle
4535  *
4536  */
4537 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
4538 {
4539 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4540 	return vdev->mac_addr.raw;
4541 }
4542 
4543 /*
4544  * dp_vdev_set_wds() - Enable per packet stats
4545  * @vdev_handle: DP VDEV handle
4546  * @val: value
4547  *
4548  * Return: none
4549  */
4550 static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
4551 {
4552 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4553 
4554 	vdev->wds_enabled = val;
4555 	return 0;
4556 }
4557 
4558 /*
4559  * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
4560  * @peer_handle:		Datapath peer handle
4561  *
4562  */
4563 static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
4564 						uint8_t vdev_id)
4565 {
4566 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
4567 	struct dp_vdev *vdev = NULL;
4568 
4569 	if (qdf_unlikely(!pdev))
4570 		return NULL;
4571 
4572 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4573 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4574 		if (vdev->vdev_id == vdev_id)
4575 			break;
4576 	}
4577 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4578 
4579 	return (struct cdp_vdev *)vdev;
4580 }
4581 
4582 static int dp_get_opmode(struct cdp_vdev *vdev_handle)
4583 {
4584 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4585 
4586 	return vdev->opmode;
4587 }
4588 
4589 static
4590 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_vdev *pvdev,
4591 					  ol_txrx_rx_fp *stack_fn_p,
4592 					  ol_osif_vdev_handle *osif_vdev_p)
4593 {
4594 	struct dp_vdev *vdev = dp_get_dp_vdev_from_cdp_vdev(pvdev);
4595 
4596 	qdf_assert(vdev);
4597 	*stack_fn_p = vdev->osif_rx_stack;
4598 	*osif_vdev_p = vdev->osif_vdev;
4599 }
4600 
4601 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
4602 {
4603 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4604 	struct dp_pdev *pdev = vdev->pdev;
4605 
4606 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
4607 }
4608 
4609 /**
4610  * dp_reset_monitor_mode() - Disable monitor mode
4611  * @pdev_handle: Datapath PDEV handle
4612  *
4613  * Return: 0 on success, not 0 on failure
4614  */
4615 static int dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
4616 {
4617 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4618 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4619 	struct dp_soc *soc = pdev->soc;
4620 	uint8_t pdev_id;
4621 	int mac_id;
4622 
4623 	pdev_id = pdev->pdev_id;
4624 	soc = pdev->soc;
4625 
4626 	qdf_spin_lock_bh(&pdev->mon_lock);
4627 
4628 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4629 
4630 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4631 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4632 
4633 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4634 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4635 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4636 
4637 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4638 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4639 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4640 	}
4641 
4642 	pdev->monitor_vdev = NULL;
4643 
4644 	qdf_spin_unlock_bh(&pdev->mon_lock);
4645 
4646 	return 0;
4647 }
4648 
4649 /**
4650  * dp_set_nac() - set peer_nac
4651  * @peer_handle: Datapath PEER handle
4652  *
4653  * Return: void
4654  */
4655 static void dp_set_nac(struct cdp_peer *peer_handle)
4656 {
4657 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4658 
4659 	peer->nac = 1;
4660 }
4661 
4662 /**
4663  * dp_get_tx_pending() - read pending tx
4664  * @pdev_handle: Datapath PDEV handle
4665  *
4666  * Return: outstanding tx
4667  */
4668 static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
4669 {
4670 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4671 
4672 	return qdf_atomic_read(&pdev->num_tx_outstanding);
4673 }
4674 
4675 /**
4676  * dp_get_peer_mac_from_peer_id() - get peer mac
4677  * @pdev_handle: Datapath PDEV handle
4678  * @peer_id: Peer ID
4679  * @peer_mac: MAC addr of PEER
4680  *
4681  * Return: void
4682  */
4683 static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
4684 	uint32_t peer_id, uint8_t *peer_mac)
4685 {
4686 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4687 	struct dp_peer *peer;
4688 
4689 	if (pdev && peer_mac) {
4690 		peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
4691 		if (peer) {
4692 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
4693 				     DP_MAC_ADDR_LEN);
4694 			dp_peer_unref_del_find_by_id(peer);
4695 		}
4696 	}
4697 }
4698 
4699 /**
4700  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
4701  * @vdev_handle: Datapath VDEV handle
4702  * @smart_monitor: Flag to denote if its smart monitor mode
4703  *
4704  * Return: 0 on success, not 0 on failure
4705  */
4706 static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
4707 		uint8_t smart_monitor)
4708 {
4709 	/* Many monitor VAPs can exists in a system but only one can be up at
4710 	 * anytime
4711 	 */
4712 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4713 	struct dp_pdev *pdev;
4714 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4715 	struct dp_soc *soc;
4716 	uint8_t pdev_id;
4717 	int mac_id;
4718 
4719 	qdf_assert(vdev);
4720 
4721 	pdev = vdev->pdev;
4722 	pdev_id = pdev->pdev_id;
4723 	soc = pdev->soc;
4724 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4725 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
4726 		pdev, pdev_id, soc, vdev);
4727 
4728 	/*Check if current pdev's monitor_vdev exists */
4729 	if (pdev->monitor_vdev) {
4730 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4731 			"vdev=%pK", vdev);
4732 		qdf_assert(vdev);
4733 	}
4734 
4735 	pdev->monitor_vdev = vdev;
4736 
4737 	/* If smart monitor mode, do not configure monitor ring */
4738 	if (smart_monitor)
4739 		return QDF_STATUS_SUCCESS;
4740 
4741 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4742 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
4743 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4744 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4745 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4746 		pdev->mo_data_filter);
4747 
4748 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4749 
4750 	htt_tlv_filter.mpdu_start = 1;
4751 	htt_tlv_filter.msdu_start = 1;
4752 	htt_tlv_filter.packet = 1;
4753 	htt_tlv_filter.msdu_end = 1;
4754 	htt_tlv_filter.mpdu_end = 1;
4755 	htt_tlv_filter.packet_header = 1;
4756 	htt_tlv_filter.attention = 1;
4757 	htt_tlv_filter.ppdu_start = 0;
4758 	htt_tlv_filter.ppdu_end = 0;
4759 	htt_tlv_filter.ppdu_end_user_stats = 0;
4760 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4761 	htt_tlv_filter.ppdu_end_status_done = 0;
4762 	htt_tlv_filter.header_per_msdu = 1;
4763 	htt_tlv_filter.enable_fp =
4764 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4765 	htt_tlv_filter.enable_md = 0;
4766 	htt_tlv_filter.enable_mo =
4767 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4768 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4769 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4770 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4771 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4772 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4773 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4774 
4775 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4776 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4777 
4778 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4779 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4780 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4781 	}
4782 
4783 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4784 
4785 	htt_tlv_filter.mpdu_start = 1;
4786 	htt_tlv_filter.msdu_start = 0;
4787 	htt_tlv_filter.packet = 0;
4788 	htt_tlv_filter.msdu_end = 0;
4789 	htt_tlv_filter.mpdu_end = 0;
4790 	htt_tlv_filter.attention = 0;
4791 	htt_tlv_filter.ppdu_start = 1;
4792 	htt_tlv_filter.ppdu_end = 1;
4793 	htt_tlv_filter.ppdu_end_user_stats = 1;
4794 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4795 	htt_tlv_filter.ppdu_end_status_done = 1;
4796 	htt_tlv_filter.enable_fp = 1;
4797 	htt_tlv_filter.enable_md = 0;
4798 	htt_tlv_filter.enable_mo = 1;
4799 	if (pdev->mcopy_mode) {
4800 		htt_tlv_filter.packet_header = 1;
4801 	}
4802 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
4803 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
4804 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
4805 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
4806 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
4807 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
4808 
4809 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4810 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4811 						pdev->pdev_id);
4812 
4813 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4814 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4815 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4816 	}
4817 
4818 	return QDF_STATUS_SUCCESS;
4819 }
4820 
4821 /**
4822  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
4823  * @pdev_handle: Datapath PDEV handle
4824  * @filter_val: Flag to select Filter for monitor mode
4825  * Return: 0 on success, not 0 on failure
4826  */
4827 static int dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
4828 	struct cdp_monitor_filter *filter_val)
4829 {
4830 	/* Many monitor VAPs can exists in a system but only one can be up at
4831 	 * anytime
4832 	 */
4833 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4834 	struct dp_vdev *vdev = pdev->monitor_vdev;
4835 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4836 	struct dp_soc *soc;
4837 	uint8_t pdev_id;
4838 	int mac_id;
4839 
4840 	pdev_id = pdev->pdev_id;
4841 	soc = pdev->soc;
4842 
4843 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4844 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
4845 		pdev, pdev_id, soc, vdev);
4846 
4847 	/*Check if current pdev's monitor_vdev exists */
4848 	if (!pdev->monitor_vdev) {
4849 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4850 			"vdev=%pK", vdev);
4851 		qdf_assert(vdev);
4852 	}
4853 
4854 	/* update filter mode, type in pdev structure */
4855 	pdev->mon_filter_mode = filter_val->mode;
4856 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
4857 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
4858 	pdev->fp_data_filter = filter_val->fp_data;
4859 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
4860 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
4861 	pdev->mo_data_filter = filter_val->mo_data;
4862 
4863 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4864 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
4865 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4866 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4867 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4868 		pdev->mo_data_filter);
4869 
4870 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4871 
4872 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4873 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4874 
4875 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4876 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4877 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4878 
4879 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4880 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4881 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4882 	}
4883 
4884 	htt_tlv_filter.mpdu_start = 1;
4885 	htt_tlv_filter.msdu_start = 1;
4886 	htt_tlv_filter.packet = 1;
4887 	htt_tlv_filter.msdu_end = 1;
4888 	htt_tlv_filter.mpdu_end = 1;
4889 	htt_tlv_filter.packet_header = 1;
4890 	htt_tlv_filter.attention = 1;
4891 	htt_tlv_filter.ppdu_start = 0;
4892 	htt_tlv_filter.ppdu_end = 0;
4893 	htt_tlv_filter.ppdu_end_user_stats = 0;
4894 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4895 	htt_tlv_filter.ppdu_end_status_done = 0;
4896 	htt_tlv_filter.header_per_msdu = 1;
4897 	htt_tlv_filter.enable_fp =
4898 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4899 	htt_tlv_filter.enable_md = 0;
4900 	htt_tlv_filter.enable_mo =
4901 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4902 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4903 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4904 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4905 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4906 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4907 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4908 
4909 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4910 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4911 
4912 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4913 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4914 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4915 	}
4916 
4917 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4918 
4919 	htt_tlv_filter.mpdu_start = 1;
4920 	htt_tlv_filter.msdu_start = 0;
4921 	htt_tlv_filter.packet = 0;
4922 	htt_tlv_filter.msdu_end = 0;
4923 	htt_tlv_filter.mpdu_end = 0;
4924 	htt_tlv_filter.attention = 0;
4925 	htt_tlv_filter.ppdu_start = 1;
4926 	htt_tlv_filter.ppdu_end = 1;
4927 	htt_tlv_filter.ppdu_end_user_stats = 1;
4928 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4929 	htt_tlv_filter.ppdu_end_status_done = 1;
4930 	htt_tlv_filter.enable_fp = 1;
4931 	htt_tlv_filter.enable_md = 0;
4932 	htt_tlv_filter.enable_mo = 1;
4933 	if (pdev->mcopy_mode) {
4934 		htt_tlv_filter.packet_header = 1;
4935 	}
4936 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
4937 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
4938 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
4939 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
4940 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
4941 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
4942 
4943 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4944 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4945 						pdev->pdev_id);
4946 
4947 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4948 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4949 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4950 	}
4951 
4952 	return QDF_STATUS_SUCCESS;
4953 }
4954 
4955 /**
4956  * dp_get_pdev_id_frm_pdev() - get pdev_id
4957  * @pdev_handle: Datapath PDEV handle
4958  *
4959  * Return: pdev_id
4960  */
4961 static
4962 uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
4963 {
4964 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4965 
4966 	return pdev->pdev_id;
4967 }
4968 
4969 /**
4970  * dp_pdev_set_chan_noise_floor() - set channel noise floor
4971  * @pdev_handle: Datapath PDEV handle
4972  * @chan_noise_floor: Channel Noise Floor
4973  *
4974  * Return: void
4975  */
4976 static
4977 void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
4978 				  int16_t chan_noise_floor)
4979 {
4980 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4981 
4982 	pdev->chan_noise_floor = chan_noise_floor;
4983 }
4984 
4985 /**
4986  * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
4987  * @vdev_handle: Datapath VDEV handle
4988  * Return: true on ucast filter flag set
4989  */
4990 static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
4991 {
4992 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4993 	struct dp_pdev *pdev;
4994 
4995 	pdev = vdev->pdev;
4996 
4997 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
4998 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
4999 		return true;
5000 
5001 	return false;
5002 }
5003 
5004 /**
5005  * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
5006  * @vdev_handle: Datapath VDEV handle
5007  * Return: true on mcast filter flag set
5008  */
5009 static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
5010 {
5011 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5012 	struct dp_pdev *pdev;
5013 
5014 	pdev = vdev->pdev;
5015 
5016 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
5017 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
5018 		return true;
5019 
5020 	return false;
5021 }
5022 
5023 /**
5024  * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
5025  * @vdev_handle: Datapath VDEV handle
5026  * Return: true on non data filter flag set
5027  */
5028 static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
5029 {
5030 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5031 	struct dp_pdev *pdev;
5032 
5033 	pdev = vdev->pdev;
5034 
5035 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
5036 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
5037 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
5038 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
5039 			return true;
5040 		}
5041 	}
5042 
5043 	return false;
5044 }
5045 
5046 #ifdef MESH_MODE_SUPPORT
5047 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
5048 {
5049 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5050 
5051 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5052 		FL("val %d"), val);
5053 	vdev->mesh_vdev = val;
5054 }
5055 
5056 /*
5057  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
5058  * @vdev_hdl: virtual device object
5059  * @val: value to be set
5060  *
5061  * Return: void
5062  */
5063 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
5064 {
5065 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5066 
5067 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5068 		FL("val %d"), val);
5069 	vdev->mesh_rx_filter = val;
5070 }
5071 #endif
5072 
5073 /*
5074  * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
5075  * Current scope is bar received count
5076  *
5077  * @pdev_handle: DP_PDEV handle
5078  *
5079  * Return: void
5080  */
5081 #define STATS_PROC_TIMEOUT        (HZ/1000)
5082 
5083 static void
5084 dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
5085 {
5086 	struct dp_vdev *vdev;
5087 	struct dp_peer *peer;
5088 	uint32_t waitcnt;
5089 
5090 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5091 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5092 			if (!peer) {
5093 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5094 					FL("DP Invalid Peer refernce"));
5095 				return;
5096 			}
5097 
5098 			if (peer->delete_in_progress) {
5099 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5100 					FL("DP Peer deletion in progress"));
5101 				continue;
5102 			}
5103 
5104 			qdf_atomic_inc(&peer->ref_cnt);
5105 			waitcnt = 0;
5106 			dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
5107 			while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
5108 				&& waitcnt < 10) {
5109 				schedule_timeout_interruptible(
5110 						STATS_PROC_TIMEOUT);
5111 				waitcnt++;
5112 			}
5113 			qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
5114 			dp_peer_unref_delete(peer);
5115 		}
5116 	}
5117 }
5118 
5119 /**
5120  * dp_rx_bar_stats_cb(): BAR received stats callback
5121  * @soc: SOC handle
5122  * @cb_ctxt: Call back context
5123  * @reo_status: Reo status
5124  *
5125  * return: void
5126  */
5127 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
5128 	union hal_reo_status *reo_status)
5129 {
5130 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
5131 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
5132 
5133 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
5134 		DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
5135 			queue_status->header.status);
5136 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
5137 		return;
5138 	}
5139 
5140 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
5141 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
5142 
5143 }
5144 
5145 /**
5146  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
5147  * @vdev: DP VDEV handle
5148  *
5149  * return: void
5150  */
5151 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
5152 			     struct cdp_vdev_stats *vdev_stats)
5153 {
5154 	struct dp_peer *peer = NULL;
5155 	struct dp_soc *soc = vdev->pdev->soc;
5156 
5157 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
5158 
5159 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
5160 		dp_update_vdev_stats(vdev_stats, peer);
5161 
5162 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5163 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
5164 			&vdev->stats, (uint16_t) vdev->vdev_id,
5165 			UPDATE_VDEV_STATS);
5166 
5167 }
5168 
5169 /**
5170  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
5171  * @pdev: DP PDEV handle
5172  *
5173  * return: void
5174  */
5175 static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
5176 {
5177 	struct dp_vdev *vdev = NULL;
5178 	struct dp_soc *soc = pdev->soc;
5179 	struct cdp_vdev_stats *vdev_stats =
5180 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
5181 
5182 	if (!vdev_stats) {
5183 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5184 			  "DP alloc failure - unable to get alloc vdev stats");
5185 		return;
5186 	}
5187 
5188 	qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
5189 	qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
5190 	qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
5191 
5192 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5193 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5194 
5195 		dp_aggregate_vdev_stats(vdev, vdev_stats);
5196 		dp_update_pdev_stats(pdev, vdev_stats);
5197 
5198 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
5199 
5200 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
5201 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
5202 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
5203 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
5204 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
5205 		DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
5206 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
5207 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host.num);
5208 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
5209 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host.num);
5210 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
5211 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
5212 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
5213 		DP_STATS_AGGR(pdev, vdev,
5214 				tx_i.mcast_en.dropped_map_error);
5215 		DP_STATS_AGGR(pdev, vdev,
5216 				tx_i.mcast_en.dropped_self_mac);
5217 		DP_STATS_AGGR(pdev, vdev,
5218 				tx_i.mcast_en.dropped_send_fail);
5219 		DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
5220 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
5221 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
5222 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
5223 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na.num);
5224 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
5225 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.headroom_insufficient);
5226 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified);
5227 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw);
5228 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.exception_fw);
5229 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.completion_fw);
5230 
5231 		pdev->stats.tx_i.dropped.dropped_pkt.num =
5232 			pdev->stats.tx_i.dropped.dma_error +
5233 			pdev->stats.tx_i.dropped.ring_full +
5234 			pdev->stats.tx_i.dropped.enqueue_fail +
5235 			pdev->stats.tx_i.dropped.desc_na.num +
5236 			pdev->stats.tx_i.dropped.res_full;
5237 
5238 		pdev->stats.tx.last_ack_rssi =
5239 			vdev->stats.tx.last_ack_rssi;
5240 		pdev->stats.tx_i.tso.num_seg =
5241 			vdev->stats.tx_i.tso.num_seg;
5242 	}
5243 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5244 	qdf_mem_free(vdev_stats);
5245 
5246 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5247 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
5248 				&pdev->stats, pdev->pdev_id, UPDATE_PDEV_STATS);
5249 
5250 }
5251 
5252 /**
5253  * dp_vdev_getstats() - get vdev packet level stats
5254  * @vdev_handle: Datapath VDEV handle
5255  * @stats: cdp network device stats structure
5256  *
5257  * Return: void
5258  */
5259 static void dp_vdev_getstats(void *vdev_handle,
5260 		struct cdp_dev_stats *stats)
5261 {
5262 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5263 	struct cdp_vdev_stats *vdev_stats =
5264 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
5265 
5266 	if (!vdev_stats) {
5267 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5268 			  "DP alloc failure - unable to get alloc vdev stats");
5269 		return;
5270 	}
5271 
5272 	dp_aggregate_vdev_stats(vdev, vdev_stats);
5273 
5274 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
5275 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
5276 
5277 	stats->tx_errors = vdev_stats->tx.tx_failed +
5278 		vdev_stats->tx_i.dropped.dropped_pkt.num;
5279 	stats->tx_dropped = stats->tx_errors;
5280 
5281 	stats->rx_packets = vdev_stats->rx.unicast.num +
5282 		vdev_stats->rx.multicast.num +
5283 		vdev_stats->rx.bcast.num;
5284 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
5285 		vdev_stats->rx.multicast.bytes +
5286 		vdev_stats->rx.bcast.bytes;
5287 
5288 }
5289 
5290 
5291 /**
5292  * dp_pdev_getstats() - get pdev packet level stats
5293  * @pdev_handle: Datapath PDEV handle
5294  * @stats: cdp network device stats structure
5295  *
5296  * Return: void
5297  */
5298 static void dp_pdev_getstats(void *pdev_handle,
5299 		struct cdp_dev_stats *stats)
5300 {
5301 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5302 
5303 	dp_aggregate_pdev_stats(pdev);
5304 
5305 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
5306 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
5307 
5308 	stats->tx_errors = pdev->stats.tx.tx_failed +
5309 		pdev->stats.tx_i.dropped.dropped_pkt.num;
5310 	stats->tx_dropped = stats->tx_errors;
5311 
5312 	stats->rx_packets = pdev->stats.rx.unicast.num +
5313 		pdev->stats.rx.multicast.num +
5314 		pdev->stats.rx.bcast.num;
5315 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
5316 		pdev->stats.rx.multicast.bytes +
5317 		pdev->stats.rx.bcast.bytes;
5318 }
5319 
5320 /**
5321  * dp_get_device_stats() - get interface level packet stats
5322  * @handle: device handle
5323  * @stats: cdp network device stats structure
5324  * @type: device type pdev/vdev
5325  *
5326  * Return: void
5327  */
5328 static void dp_get_device_stats(void *handle,
5329 		struct cdp_dev_stats *stats, uint8_t type)
5330 {
5331 	switch (type) {
5332 	case UPDATE_VDEV_STATS:
5333 		dp_vdev_getstats(handle, stats);
5334 		break;
5335 	case UPDATE_PDEV_STATS:
5336 		dp_pdev_getstats(handle, stats);
5337 		break;
5338 	default:
5339 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5340 			"apstats cannot be updated for this input "
5341 			"type %d", type);
5342 		break;
5343 	}
5344 
5345 }
5346 
5347 
5348 /**
5349  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
5350  * @pdev: DP_PDEV Handle
5351  *
5352  * Return:void
5353  */
5354 static inline void
5355 dp_print_pdev_tx_stats(struct dp_pdev *pdev)
5356 {
5357 	uint8_t index = 0;
5358 	DP_PRINT_STATS("PDEV Tx Stats:\n");
5359 	DP_PRINT_STATS("Received From Stack:");
5360 	DP_PRINT_STATS("	Packets = %d",
5361 			pdev->stats.tx_i.rcvd.num);
5362 	DP_PRINT_STATS("	Bytes = %llu",
5363 			pdev->stats.tx_i.rcvd.bytes);
5364 	DP_PRINT_STATS("Processed:");
5365 	DP_PRINT_STATS("	Packets = %d",
5366 			pdev->stats.tx_i.processed.num);
5367 	DP_PRINT_STATS("	Bytes = %llu",
5368 			pdev->stats.tx_i.processed.bytes);
5369 	DP_PRINT_STATS("Total Completions:");
5370 	DP_PRINT_STATS("	Packets = %u",
5371 			pdev->stats.tx.comp_pkt.num);
5372 	DP_PRINT_STATS("	Bytes = %llu",
5373 			pdev->stats.tx.comp_pkt.bytes);
5374 	DP_PRINT_STATS("Successful Completions:");
5375 	DP_PRINT_STATS("	Packets = %u",
5376 			pdev->stats.tx.tx_success.num);
5377 	DP_PRINT_STATS("	Bytes = %llu",
5378 			pdev->stats.tx.tx_success.bytes);
5379 	DP_PRINT_STATS("Dropped:");
5380 	DP_PRINT_STATS("	Total = %d",
5381 			pdev->stats.tx_i.dropped.dropped_pkt.num);
5382 	DP_PRINT_STATS("	Dma_map_error = %d",
5383 			pdev->stats.tx_i.dropped.dma_error);
5384 	DP_PRINT_STATS("	Ring Full = %d",
5385 			pdev->stats.tx_i.dropped.ring_full);
5386 	DP_PRINT_STATS("	Descriptor Not available = %d",
5387 			pdev->stats.tx_i.dropped.desc_na.num);
5388 	DP_PRINT_STATS("	HW enqueue failed= %d",
5389 			pdev->stats.tx_i.dropped.enqueue_fail);
5390 	DP_PRINT_STATS("	Resources Full = %d",
5391 			pdev->stats.tx_i.dropped.res_full);
5392 	DP_PRINT_STATS("	FW removed = %d",
5393 			pdev->stats.tx.dropped.fw_rem);
5394 	DP_PRINT_STATS("	FW removed transmitted = %d",
5395 			pdev->stats.tx.dropped.fw_rem_tx);
5396 	DP_PRINT_STATS("	FW removed untransmitted = %d",
5397 			pdev->stats.tx.dropped.fw_rem_notx);
5398 	DP_PRINT_STATS("	FW removed untransmitted fw_reason1 = %d",
5399 			pdev->stats.tx.dropped.fw_reason1);
5400 	DP_PRINT_STATS("	FW removed untransmitted fw_reason2 = %d",
5401 			pdev->stats.tx.dropped.fw_reason2);
5402 	DP_PRINT_STATS("	FW removed untransmitted fw_reason3 = %d",
5403 			pdev->stats.tx.dropped.fw_reason3);
5404 	DP_PRINT_STATS("	Aged Out from msdu/mpdu queues = %d",
5405 			pdev->stats.tx.dropped.age_out);
5406 	DP_PRINT_STATS("	headroom insufficient = %d",
5407 			pdev->stats.tx_i.dropped.headroom_insufficient);
5408 	DP_PRINT_STATS("	Multicast:");
5409 	DP_PRINT_STATS("	Packets: %u",
5410 		       pdev->stats.tx.mcast.num);
5411 	DP_PRINT_STATS("	Bytes: %llu",
5412 		       pdev->stats.tx.mcast.bytes);
5413 	DP_PRINT_STATS("Scatter Gather:");
5414 	DP_PRINT_STATS("	Packets = %d",
5415 			pdev->stats.tx_i.sg.sg_pkt.num);
5416 	DP_PRINT_STATS("	Bytes = %llu",
5417 			pdev->stats.tx_i.sg.sg_pkt.bytes);
5418 	DP_PRINT_STATS("	Dropped By Host = %d",
5419 			pdev->stats.tx_i.sg.dropped_host.num);
5420 	DP_PRINT_STATS("	Dropped By Target = %d",
5421 			pdev->stats.tx_i.sg.dropped_target);
5422 	DP_PRINT_STATS("TSO:");
5423 	DP_PRINT_STATS("	Number of Segments = %d",
5424 			pdev->stats.tx_i.tso.num_seg);
5425 	DP_PRINT_STATS("	Packets = %d",
5426 			pdev->stats.tx_i.tso.tso_pkt.num);
5427 	DP_PRINT_STATS("	Bytes = %llu",
5428 			pdev->stats.tx_i.tso.tso_pkt.bytes);
5429 	DP_PRINT_STATS("	Dropped By Host = %d",
5430 			pdev->stats.tx_i.tso.dropped_host.num);
5431 	DP_PRINT_STATS("Mcast Enhancement:");
5432 	DP_PRINT_STATS("	Packets = %d",
5433 			pdev->stats.tx_i.mcast_en.mcast_pkt.num);
5434 	DP_PRINT_STATS("	Bytes = %llu",
5435 			pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
5436 	DP_PRINT_STATS("	Dropped: Map Errors = %d",
5437 			pdev->stats.tx_i.mcast_en.dropped_map_error);
5438 	DP_PRINT_STATS("	Dropped: Self Mac = %d",
5439 			pdev->stats.tx_i.mcast_en.dropped_self_mac);
5440 	DP_PRINT_STATS("	Dropped: Send Fail = %d",
5441 			pdev->stats.tx_i.mcast_en.dropped_send_fail);
5442 	DP_PRINT_STATS("	Unicast sent = %d",
5443 			pdev->stats.tx_i.mcast_en.ucast);
5444 	DP_PRINT_STATS("Raw:");
5445 	DP_PRINT_STATS("	Packets = %d",
5446 			pdev->stats.tx_i.raw.raw_pkt.num);
5447 	DP_PRINT_STATS("	Bytes = %llu",
5448 			pdev->stats.tx_i.raw.raw_pkt.bytes);
5449 	DP_PRINT_STATS("	DMA map error = %d",
5450 			pdev->stats.tx_i.raw.dma_map_error);
5451 	DP_PRINT_STATS("Reinjected:");
5452 	DP_PRINT_STATS("	Packets = %d",
5453 			pdev->stats.tx_i.reinject_pkts.num);
5454 	DP_PRINT_STATS("	Bytes = %llu\n",
5455 			pdev->stats.tx_i.reinject_pkts.bytes);
5456 	DP_PRINT_STATS("Inspected:");
5457 	DP_PRINT_STATS("	Packets = %d",
5458 			pdev->stats.tx_i.inspect_pkts.num);
5459 	DP_PRINT_STATS("	Bytes = %llu",
5460 			pdev->stats.tx_i.inspect_pkts.bytes);
5461 	DP_PRINT_STATS("Nawds Multicast:");
5462 	DP_PRINT_STATS("	Packets = %d",
5463 			pdev->stats.tx_i.nawds_mcast.num);
5464 	DP_PRINT_STATS("	Bytes = %llu",
5465 			pdev->stats.tx_i.nawds_mcast.bytes);
5466 	DP_PRINT_STATS("CCE Classified:");
5467 	DP_PRINT_STATS("	CCE Classified Packets: %u",
5468 			pdev->stats.tx_i.cce_classified);
5469 	DP_PRINT_STATS("	RAW CCE Classified Packets: %u",
5470 			pdev->stats.tx_i.cce_classified_raw);
5471 	DP_PRINT_STATS("Mesh stats:");
5472 	DP_PRINT_STATS("	frames to firmware: %u",
5473 			pdev->stats.tx_i.mesh.exception_fw);
5474 	DP_PRINT_STATS("	completions from fw: %u",
5475 			pdev->stats.tx_i.mesh.completion_fw);
5476 	DP_PRINT_STATS("PPDU stats counter");
5477 	for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
5478 		DP_PRINT_STATS("	Tag[%d] = %llu", index,
5479 				pdev->stats.ppdu_stats_counter[index]);
5480 	}
5481 }
5482 
5483 /**
5484  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
5485  * @pdev: DP_PDEV Handle
5486  *
5487  * Return: void
5488  */
5489 static inline void
5490 dp_print_pdev_rx_stats(struct dp_pdev *pdev)
5491 {
5492 	DP_PRINT_STATS("PDEV Rx Stats:\n");
5493 	DP_PRINT_STATS("Received From HW (Per Rx Ring):");
5494 	DP_PRINT_STATS("	Packets = %d %d %d %d",
5495 			pdev->stats.rx.rcvd_reo[0].num,
5496 			pdev->stats.rx.rcvd_reo[1].num,
5497 			pdev->stats.rx.rcvd_reo[2].num,
5498 			pdev->stats.rx.rcvd_reo[3].num);
5499 	DP_PRINT_STATS("	Bytes = %llu %llu %llu %llu",
5500 			pdev->stats.rx.rcvd_reo[0].bytes,
5501 			pdev->stats.rx.rcvd_reo[1].bytes,
5502 			pdev->stats.rx.rcvd_reo[2].bytes,
5503 			pdev->stats.rx.rcvd_reo[3].bytes);
5504 	DP_PRINT_STATS("Replenished:");
5505 	DP_PRINT_STATS("	Packets = %d",
5506 			pdev->stats.replenish.pkts.num);
5507 	DP_PRINT_STATS("	Bytes = %llu",
5508 			pdev->stats.replenish.pkts.bytes);
5509 	DP_PRINT_STATS("	Buffers Added To Freelist = %d",
5510 			pdev->stats.buf_freelist);
5511 	DP_PRINT_STATS("	Low threshold intr = %d",
5512 			pdev->stats.replenish.low_thresh_intrs);
5513 	DP_PRINT_STATS("Dropped:");
5514 	DP_PRINT_STATS("	msdu_not_done = %d",
5515 			pdev->stats.dropped.msdu_not_done);
5516 	DP_PRINT_STATS("        mon_rx_drop = %d",
5517 			pdev->stats.dropped.mon_rx_drop);
5518 	DP_PRINT_STATS("        mec_drop = %d",
5519 		       pdev->stats.rx.mec_drop.num);
5520 	DP_PRINT_STATS("	Bytes = %llu",
5521 		       pdev->stats.rx.mec_drop.bytes);
5522 	DP_PRINT_STATS("Sent To Stack:");
5523 	DP_PRINT_STATS("	Packets = %d",
5524 			pdev->stats.rx.to_stack.num);
5525 	DP_PRINT_STATS("	Bytes = %llu",
5526 			pdev->stats.rx.to_stack.bytes);
5527 	DP_PRINT_STATS("Multicast/Broadcast:");
5528 	DP_PRINT_STATS("	Packets = %d",
5529 			pdev->stats.rx.multicast.num);
5530 	DP_PRINT_STATS("	Bytes = %llu",
5531 			pdev->stats.rx.multicast.bytes);
5532 	DP_PRINT_STATS("Errors:");
5533 	DP_PRINT_STATS("	Rxdma Ring Un-inititalized = %d",
5534 			pdev->stats.replenish.rxdma_err);
5535 	DP_PRINT_STATS("	Desc Alloc Failed: = %d",
5536 			pdev->stats.err.desc_alloc_fail);
5537 	DP_PRINT_STATS("	IP checksum error = %d",
5538 		       pdev->stats.err.ip_csum_err);
5539 	DP_PRINT_STATS("	TCP/UDP checksum error = %d",
5540 		       pdev->stats.err.tcp_udp_csum_err);
5541 
5542 	/* Get bar_recv_cnt */
5543 	dp_aggregate_pdev_ctrl_frames_stats(pdev);
5544 	DP_PRINT_STATS("BAR Received Count: = %d",
5545 			pdev->stats.rx.bar_recv_cnt);
5546 
5547 }
5548 
5549 /**
5550  * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
5551  * @pdev: DP_PDEV Handle
5552  *
5553  * Return: void
5554  */
5555 static inline void
5556 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
5557 {
5558 	struct cdp_pdev_mon_stats *rx_mon_stats;
5559 
5560 	rx_mon_stats = &pdev->rx_mon_stats;
5561 
5562 	DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
5563 
5564 	dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
5565 
5566 	DP_PRINT_STATS("status_ppdu_done_cnt = %d",
5567 		       rx_mon_stats->status_ppdu_done);
5568 	DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
5569 		       rx_mon_stats->dest_ppdu_done);
5570 	DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
5571 		       rx_mon_stats->dest_mpdu_done);
5572 	DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
5573 		       rx_mon_stats->dest_mpdu_drop);
5574 }
5575 
5576 /**
5577  * dp_print_soc_tx_stats(): Print SOC level  stats
5578  * @soc DP_SOC Handle
5579  *
5580  * Return: void
5581  */
5582 static inline void
5583 dp_print_soc_tx_stats(struct dp_soc *soc)
5584 {
5585 	uint8_t desc_pool_id;
5586 	soc->stats.tx.desc_in_use = 0;
5587 
5588 	DP_PRINT_STATS("SOC Tx Stats:\n");
5589 
5590 	for (desc_pool_id = 0;
5591 	     desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5592 	     desc_pool_id++)
5593 		soc->stats.tx.desc_in_use +=
5594 			soc->tx_desc[desc_pool_id].num_allocated;
5595 
5596 	DP_PRINT_STATS("Tx Descriptors In Use = %d",
5597 			soc->stats.tx.desc_in_use);
5598 	DP_PRINT_STATS("Invalid peer:");
5599 	DP_PRINT_STATS("	Packets = %d",
5600 			soc->stats.tx.tx_invalid_peer.num);
5601 	DP_PRINT_STATS("	Bytes = %llu",
5602 			soc->stats.tx.tx_invalid_peer.bytes);
5603 	DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
5604 			soc->stats.tx.tcl_ring_full[0],
5605 			soc->stats.tx.tcl_ring_full[1],
5606 			soc->stats.tx.tcl_ring_full[2]);
5607 
5608 }
5609 /**
5610  * dp_print_soc_rx_stats: Print SOC level Rx stats
5611  * @soc: DP_SOC Handle
5612  *
5613  * Return:void
5614  */
5615 static inline void
5616 dp_print_soc_rx_stats(struct dp_soc *soc)
5617 {
5618 	uint32_t i;
5619 	char reo_error[DP_REO_ERR_LENGTH];
5620 	char rxdma_error[DP_RXDMA_ERR_LENGTH];
5621 	uint8_t index = 0;
5622 
5623 	DP_PRINT_STATS("SOC Rx Stats:\n");
5624 	DP_PRINT_STATS("Fragmented packets: %u",
5625 		       soc->stats.rx.rx_frags);
5626 	DP_PRINT_STATS("Reo reinjected packets: %u",
5627 		       soc->stats.rx.reo_reinject);
5628 	DP_PRINT_STATS("Errors:\n");
5629 	DP_PRINT_STATS("Rx Decrypt Errors = %d",
5630 			(soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
5631 			soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
5632 	DP_PRINT_STATS("Invalid RBM = %d",
5633 			soc->stats.rx.err.invalid_rbm);
5634 	DP_PRINT_STATS("Invalid Vdev = %d",
5635 			soc->stats.rx.err.invalid_vdev);
5636 	DP_PRINT_STATS("Invalid Pdev = %d",
5637 			soc->stats.rx.err.invalid_pdev);
5638 	DP_PRINT_STATS("Invalid Peer = %d",
5639 			soc->stats.rx.err.rx_invalid_peer.num);
5640 	DP_PRINT_STATS("HAL Ring Access Fail = %d",
5641 			soc->stats.rx.err.hal_ring_access_fail);
5642 	DP_PRINT_STATS("RX frags: %d", soc->stats.rx.rx_frags);
5643 	DP_PRINT_STATS("RX HP out_of_sync: %d", soc->stats.rx.hp_oos);
5644 
5645 	for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
5646 		index += qdf_snprint(&rxdma_error[index],
5647 				DP_RXDMA_ERR_LENGTH - index,
5648 				" %d", soc->stats.rx.err.rxdma_error[i]);
5649 	}
5650 	DP_PRINT_STATS("RXDMA Error (0-31):%s",
5651 			rxdma_error);
5652 
5653 	index = 0;
5654 	for (i = 0; i < HAL_REO_ERR_MAX; i++) {
5655 		index += qdf_snprint(&reo_error[index],
5656 				DP_REO_ERR_LENGTH - index,
5657 				" %d", soc->stats.rx.err.reo_error[i]);
5658 	}
5659 	DP_PRINT_STATS("REO Error(0-14):%s",
5660 			reo_error);
5661 }
5662 
5663 
5664 /**
5665  * dp_print_ring_stat_from_hal(): Print hal level ring stats
5666  * @soc: DP_SOC handle
5667  * @srng: DP_SRNG handle
5668  * @ring_name: SRNG name
5669  *
5670  * Return: void
5671  */
5672 static inline void
5673 dp_print_ring_stat_from_hal(struct dp_soc *soc,  struct dp_srng *srng,
5674 	char *ring_name)
5675 {
5676 	uint32_t tailp;
5677 	uint32_t headp;
5678 
5679 	if (srng->hal_srng != NULL) {
5680 		hal_api_get_tphp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
5681 		DP_PRINT_STATS("%s : Head pointer = %d  Tail Pointer = %d\n",
5682 				ring_name, headp, tailp);
5683 	}
5684 }
5685 
5686 /**
5687  * dp_print_ring_stats(): Print tail and head pointer
5688  * @pdev: DP_PDEV handle
5689  *
5690  * Return:void
5691  */
5692 static inline void
5693 dp_print_ring_stats(struct dp_pdev *pdev)
5694 {
5695 	uint32_t i;
5696 	char ring_name[STR_MAXLEN + 1];
5697 	int mac_id;
5698 
5699 	dp_print_ring_stat_from_hal(pdev->soc,
5700 			&pdev->soc->reo_exception_ring,
5701 			"Reo Exception Ring");
5702 	dp_print_ring_stat_from_hal(pdev->soc,
5703 			&pdev->soc->reo_reinject_ring,
5704 			"Reo Inject Ring");
5705 	dp_print_ring_stat_from_hal(pdev->soc,
5706 			&pdev->soc->reo_cmd_ring,
5707 			"Reo Command Ring");
5708 	dp_print_ring_stat_from_hal(pdev->soc,
5709 			&pdev->soc->reo_status_ring,
5710 			"Reo Status Ring");
5711 	dp_print_ring_stat_from_hal(pdev->soc,
5712 			&pdev->soc->rx_rel_ring,
5713 			"Rx Release ring");
5714 	dp_print_ring_stat_from_hal(pdev->soc,
5715 			&pdev->soc->tcl_cmd_ring,
5716 			"Tcl command Ring");
5717 	dp_print_ring_stat_from_hal(pdev->soc,
5718 			&pdev->soc->tcl_status_ring,
5719 			"Tcl Status Ring");
5720 	dp_print_ring_stat_from_hal(pdev->soc,
5721 			&pdev->soc->wbm_desc_rel_ring,
5722 			"Wbm Desc Rel Ring");
5723 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5724 		snprintf(ring_name, STR_MAXLEN, "Reo Dest Ring %d", i);
5725 		dp_print_ring_stat_from_hal(pdev->soc,
5726 				&pdev->soc->reo_dest_ring[i],
5727 				ring_name);
5728 	}
5729 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
5730 		snprintf(ring_name, STR_MAXLEN, "Tcl Data Ring %d", i);
5731 		dp_print_ring_stat_from_hal(pdev->soc,
5732 				&pdev->soc->tcl_data_ring[i],
5733 				ring_name);
5734 	}
5735 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
5736 		snprintf(ring_name, STR_MAXLEN, "Tx Comp Ring %d", i);
5737 		dp_print_ring_stat_from_hal(pdev->soc,
5738 				&pdev->soc->tx_comp_ring[i],
5739 				ring_name);
5740 	}
5741 	dp_print_ring_stat_from_hal(pdev->soc,
5742 			&pdev->rx_refill_buf_ring,
5743 			"Rx Refill Buf Ring");
5744 
5745 	dp_print_ring_stat_from_hal(pdev->soc,
5746 			&pdev->rx_refill_buf_ring2,
5747 			"Second Rx Refill Buf Ring");
5748 
5749 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5750 		dp_print_ring_stat_from_hal(pdev->soc,
5751 				&pdev->rxdma_mon_buf_ring[mac_id],
5752 				"Rxdma Mon Buf Ring");
5753 		dp_print_ring_stat_from_hal(pdev->soc,
5754 				&pdev->rxdma_mon_dst_ring[mac_id],
5755 				"Rxdma Mon Dst Ring");
5756 		dp_print_ring_stat_from_hal(pdev->soc,
5757 				&pdev->rxdma_mon_status_ring[mac_id],
5758 				"Rxdma Mon Status Ring");
5759 		dp_print_ring_stat_from_hal(pdev->soc,
5760 				&pdev->rxdma_mon_desc_ring[mac_id],
5761 				"Rxdma mon desc Ring");
5762 	}
5763 
5764 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
5765 		snprintf(ring_name, STR_MAXLEN, "Rxdma err dst ring %d", i);
5766 		dp_print_ring_stat_from_hal(pdev->soc,
5767 			&pdev->rxdma_err_dst_ring[i],
5768 			ring_name);
5769 	}
5770 
5771 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
5772 		snprintf(ring_name, STR_MAXLEN, "Rx mac buf ring %d", i);
5773 		dp_print_ring_stat_from_hal(pdev->soc,
5774 				&pdev->rx_mac_buf_ring[i],
5775 				ring_name);
5776 	}
5777 }
5778 
5779 /**
5780  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
5781  * @vdev: DP_VDEV handle
5782  *
5783  * Return:void
5784  */
5785 static inline void
5786 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
5787 {
5788 	struct dp_peer *peer = NULL;
5789 	struct dp_soc *soc = (struct dp_soc *)vdev->pdev->soc;
5790 
5791 	DP_STATS_CLR(vdev->pdev);
5792 	DP_STATS_CLR(vdev->pdev->soc);
5793 	DP_STATS_CLR(vdev);
5794 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5795 		if (!peer)
5796 			return;
5797 		DP_STATS_CLR(peer);
5798 
5799 		if (soc->cdp_soc.ol_ops->update_dp_stats) {
5800 			soc->cdp_soc.ol_ops->update_dp_stats(
5801 					vdev->pdev->ctrl_pdev,
5802 					&peer->stats,
5803 					peer->peer_ids[0],
5804 					UPDATE_PEER_STATS);
5805 		}
5806 
5807 	}
5808 
5809 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5810 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
5811 				&vdev->stats, (uint16_t)vdev->vdev_id,
5812 				UPDATE_VDEV_STATS);
5813 }
5814 
5815 /**
5816  * dp_print_common_rates_info(): Print common rate for tx or rx
5817  * @pkt_type_array: rate type array contains rate info
5818  *
5819  * Return:void
5820  */
5821 static inline void
5822 dp_print_common_rates_info(struct cdp_pkt_type *pkt_type_array)
5823 {
5824 	uint8_t mcs, pkt_type;
5825 
5826 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5827 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5828 			if (!dp_rate_string[pkt_type][mcs].valid)
5829 				continue;
5830 
5831 			DP_PRINT_STATS("	%s = %d",
5832 				       dp_rate_string[pkt_type][mcs].mcs_type,
5833 				       pkt_type_array[pkt_type].mcs_count[mcs]);
5834 		}
5835 
5836 		DP_PRINT_STATS("\n");
5837 	}
5838 }
5839 
5840 /**
5841  * dp_print_rx_rates(): Print Rx rate stats
5842  * @vdev: DP_VDEV handle
5843  *
5844  * Return:void
5845  */
5846 static inline void
5847 dp_print_rx_rates(struct dp_vdev *vdev)
5848 {
5849 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5850 	uint8_t i;
5851 	uint8_t index = 0;
5852 	char nss[DP_NSS_LENGTH];
5853 
5854 	DP_PRINT_STATS("Rx Rate Info:\n");
5855 	dp_print_common_rates_info(pdev->stats.rx.pkt_type);
5856 
5857 
5858 	index = 0;
5859 	for (i = 0; i < SS_COUNT; i++) {
5860 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5861 				" %d", pdev->stats.rx.nss[i]);
5862 	}
5863 	DP_PRINT_STATS("NSS(1-8) = %s",
5864 			nss);
5865 
5866 	DP_PRINT_STATS("SGI ="
5867 			" 0.8us %d,"
5868 			" 0.4us %d,"
5869 			" 1.6us %d,"
5870 			" 3.2us %d,",
5871 			pdev->stats.rx.sgi_count[0],
5872 			pdev->stats.rx.sgi_count[1],
5873 			pdev->stats.rx.sgi_count[2],
5874 			pdev->stats.rx.sgi_count[3]);
5875 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
5876 			pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
5877 			pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
5878 	DP_PRINT_STATS("Reception Type ="
5879 			" SU: %d,"
5880 			" MU_MIMO:%d,"
5881 			" MU_OFDMA:%d,"
5882 			" MU_OFDMA_MIMO:%d\n",
5883 			pdev->stats.rx.reception_type[0],
5884 			pdev->stats.rx.reception_type[1],
5885 			pdev->stats.rx.reception_type[2],
5886 			pdev->stats.rx.reception_type[3]);
5887 	DP_PRINT_STATS("Aggregation:\n");
5888 	DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
5889 			pdev->stats.rx.ampdu_cnt);
5890 	DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
5891 			pdev->stats.rx.non_ampdu_cnt);
5892 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
5893 			pdev->stats.rx.amsdu_cnt);
5894 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
5895 			pdev->stats.rx.non_amsdu_cnt);
5896 }
5897 
5898 /**
5899  * dp_print_tx_rates(): Print tx rates
5900  * @vdev: DP_VDEV handle
5901  *
5902  * Return:void
5903  */
5904 static inline void
5905 dp_print_tx_rates(struct dp_vdev *vdev)
5906 {
5907 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5908 	uint8_t index;
5909 	char nss[DP_NSS_LENGTH];
5910 	int nss_index;
5911 
5912 	DP_PRINT_STATS("Tx Rate Info:\n");
5913 	dp_print_common_rates_info(pdev->stats.tx.pkt_type);
5914 
5915 	DP_PRINT_STATS("SGI ="
5916 			" 0.8us %d"
5917 			" 0.4us %d"
5918 			" 1.6us %d"
5919 			" 3.2us %d",
5920 			pdev->stats.tx.sgi_count[0],
5921 			pdev->stats.tx.sgi_count[1],
5922 			pdev->stats.tx.sgi_count[2],
5923 			pdev->stats.tx.sgi_count[3]);
5924 
5925 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
5926 			pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
5927 			pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
5928 
5929 	index = 0;
5930 	for (nss_index = 0; nss_index < SS_COUNT; nss_index++) {
5931 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5932 				" %d", pdev->stats.tx.nss[nss_index]);
5933 	}
5934 
5935 	DP_PRINT_STATS("NSS(1-8) = %s", nss);
5936 	DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
5937 	DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
5938 	DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
5939 	DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
5940 	DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
5941 
5942 	DP_PRINT_STATS("Aggregation:\n");
5943 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
5944 			pdev->stats.tx.amsdu_cnt);
5945 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
5946 			pdev->stats.tx.non_amsdu_cnt);
5947 }
5948 
5949 /**
5950  * dp_print_peer_stats():print peer stats
5951  * @peer: DP_PEER handle
5952  *
5953  * return void
5954  */
5955 static inline void dp_print_peer_stats(struct dp_peer *peer)
5956 {
5957 	uint8_t i;
5958 	uint32_t index;
5959 	char nss[DP_NSS_LENGTH];
5960 	DP_PRINT_STATS("Node Tx Stats:\n");
5961 	DP_PRINT_STATS("Total Packet Completions = %d",
5962 			peer->stats.tx.comp_pkt.num);
5963 	DP_PRINT_STATS("Total Bytes Completions = %llu",
5964 			peer->stats.tx.comp_pkt.bytes);
5965 	DP_PRINT_STATS("Success Packets = %d",
5966 			peer->stats.tx.tx_success.num);
5967 	DP_PRINT_STATS("Success Bytes = %llu",
5968 			peer->stats.tx.tx_success.bytes);
5969 	DP_PRINT_STATS("Unicast Success Packets = %d",
5970 			peer->stats.tx.ucast.num);
5971 	DP_PRINT_STATS("Unicast Success Bytes = %llu",
5972 			peer->stats.tx.ucast.bytes);
5973 	DP_PRINT_STATS("Multicast Success Packets = %d",
5974 			peer->stats.tx.mcast.num);
5975 	DP_PRINT_STATS("Multicast Success Bytes = %llu",
5976 			peer->stats.tx.mcast.bytes);
5977 	DP_PRINT_STATS("Broadcast Success Packets = %d",
5978 			peer->stats.tx.bcast.num);
5979 	DP_PRINT_STATS("Broadcast Success Bytes = %llu",
5980 			peer->stats.tx.bcast.bytes);
5981 	DP_PRINT_STATS("Packets Failed = %d",
5982 			peer->stats.tx.tx_failed);
5983 	DP_PRINT_STATS("Packets In OFDMA = %d",
5984 			peer->stats.tx.ofdma);
5985 	DP_PRINT_STATS("Packets In STBC = %d",
5986 			peer->stats.tx.stbc);
5987 	DP_PRINT_STATS("Packets In LDPC = %d",
5988 			peer->stats.tx.ldpc);
5989 	DP_PRINT_STATS("Packet Retries = %d",
5990 			peer->stats.tx.retries);
5991 	DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
5992 			peer->stats.tx.amsdu_cnt);
5993 	DP_PRINT_STATS("Last Packet RSSI = %d",
5994 			peer->stats.tx.last_ack_rssi);
5995 	DP_PRINT_STATS("Dropped At FW: Removed = %d",
5996 			peer->stats.tx.dropped.fw_rem);
5997 	DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
5998 			peer->stats.tx.dropped.fw_rem_tx);
5999 	DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
6000 			peer->stats.tx.dropped.fw_rem_notx);
6001 	DP_PRINT_STATS("Dropped : Age Out = %d",
6002 			peer->stats.tx.dropped.age_out);
6003 	DP_PRINT_STATS("NAWDS : ");
6004 	DP_PRINT_STATS("	Nawds multicast Drop Tx Packet = %d",
6005 			peer->stats.tx.nawds_mcast_drop);
6006 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Count = %d",
6007 			peer->stats.tx.nawds_mcast.num);
6008 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Bytes = %llu",
6009 			peer->stats.tx.nawds_mcast.bytes);
6010 
6011 	DP_PRINT_STATS("Rate Info:");
6012 	dp_print_common_rates_info(peer->stats.tx.pkt_type);
6013 
6014 
6015 	DP_PRINT_STATS("SGI = "
6016 			" 0.8us %d"
6017 			" 0.4us %d"
6018 			" 1.6us %d"
6019 			" 3.2us %d",
6020 			peer->stats.tx.sgi_count[0],
6021 			peer->stats.tx.sgi_count[1],
6022 			peer->stats.tx.sgi_count[2],
6023 			peer->stats.tx.sgi_count[3]);
6024 	DP_PRINT_STATS("Excess Retries per AC ");
6025 	DP_PRINT_STATS("	 Best effort = %d",
6026 			peer->stats.tx.excess_retries_per_ac[0]);
6027 	DP_PRINT_STATS("	 Background= %d",
6028 			peer->stats.tx.excess_retries_per_ac[1]);
6029 	DP_PRINT_STATS("	 Video = %d",
6030 			peer->stats.tx.excess_retries_per_ac[2]);
6031 	DP_PRINT_STATS("	 Voice = %d",
6032 			peer->stats.tx.excess_retries_per_ac[3]);
6033 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
6034 			peer->stats.tx.bw[2], peer->stats.tx.bw[3],
6035 			peer->stats.tx.bw[4], peer->stats.tx.bw[5]);
6036 
6037 	index = 0;
6038 	for (i = 0; i < SS_COUNT; i++) {
6039 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6040 				" %d", peer->stats.tx.nss[i]);
6041 	}
6042 	DP_PRINT_STATS("NSS(1-8) = %s",
6043 			nss);
6044 
6045 	DP_PRINT_STATS("Aggregation:");
6046 	DP_PRINT_STATS("	Number of Msdu's Part of Amsdu = %d",
6047 			peer->stats.tx.amsdu_cnt);
6048 	DP_PRINT_STATS("	Number of Msdu's With No Msdu Level Aggregation = %d\n",
6049 			peer->stats.tx.non_amsdu_cnt);
6050 
6051 	DP_PRINT_STATS("Bytes and Packets transmitted  in last one sec:");
6052 	DP_PRINT_STATS("	Bytes transmitted in last sec: %d",
6053 		       peer->stats.tx.tx_byte_rate);
6054 	DP_PRINT_STATS("	Data transmitted in last sec: %d",
6055 		       peer->stats.tx.tx_data_rate);
6056 
6057 	DP_PRINT_STATS("Node Rx Stats:");
6058 	DP_PRINT_STATS("Packets Sent To Stack = %d",
6059 			peer->stats.rx.to_stack.num);
6060 	DP_PRINT_STATS("Bytes Sent To Stack = %llu",
6061 			peer->stats.rx.to_stack.bytes);
6062 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
6063 		DP_PRINT_STATS("Ring Id = %d", i);
6064 		DP_PRINT_STATS("	Packets Received = %d",
6065 				peer->stats.rx.rcvd_reo[i].num);
6066 		DP_PRINT_STATS("	Bytes Received = %llu",
6067 				peer->stats.rx.rcvd_reo[i].bytes);
6068 	}
6069 	DP_PRINT_STATS("Multicast Packets Received = %d",
6070 			peer->stats.rx.multicast.num);
6071 	DP_PRINT_STATS("Multicast Bytes Received = %llu",
6072 			peer->stats.rx.multicast.bytes);
6073 	DP_PRINT_STATS("Broadcast Packets Received = %d",
6074 			peer->stats.rx.bcast.num);
6075 	DP_PRINT_STATS("Broadcast Bytes Received = %llu",
6076 			peer->stats.rx.bcast.bytes);
6077 	DP_PRINT_STATS("Intra BSS Packets Received = %d",
6078 			peer->stats.rx.intra_bss.pkts.num);
6079 	DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
6080 			peer->stats.rx.intra_bss.pkts.bytes);
6081 	DP_PRINT_STATS("Raw Packets Received = %d",
6082 			peer->stats.rx.raw.num);
6083 	DP_PRINT_STATS("Raw Bytes Received = %llu",
6084 			peer->stats.rx.raw.bytes);
6085 	DP_PRINT_STATS("Errors: MIC Errors = %d",
6086 			peer->stats.rx.err.mic_err);
6087 	DP_PRINT_STATS("Erros: Decryption Errors = %d",
6088 			peer->stats.rx.err.decrypt_err);
6089 	DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
6090 			peer->stats.rx.non_ampdu_cnt);
6091 	DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
6092 			peer->stats.rx.ampdu_cnt);
6093 	DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
6094 			peer->stats.rx.non_amsdu_cnt);
6095 	DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
6096 			peer->stats.rx.amsdu_cnt);
6097 	DP_PRINT_STATS("NAWDS : ");
6098 	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet = %d",
6099 			peer->stats.rx.nawds_mcast_drop);
6100 	DP_PRINT_STATS("SGI ="
6101 			" 0.8us %d"
6102 			" 0.4us %d"
6103 			" 1.6us %d"
6104 			" 3.2us %d",
6105 			peer->stats.rx.sgi_count[0],
6106 			peer->stats.rx.sgi_count[1],
6107 			peer->stats.rx.sgi_count[2],
6108 			peer->stats.rx.sgi_count[3]);
6109 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
6110 			peer->stats.rx.bw[0], peer->stats.rx.bw[1],
6111 			peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
6112 	DP_PRINT_STATS("Reception Type ="
6113 			" SU %d,"
6114 			" MU_MIMO %d,"
6115 			" MU_OFDMA %d,"
6116 			" MU_OFDMA_MIMO %d",
6117 			peer->stats.rx.reception_type[0],
6118 			peer->stats.rx.reception_type[1],
6119 			peer->stats.rx.reception_type[2],
6120 			peer->stats.rx.reception_type[3]);
6121 
6122 	dp_print_common_rates_info(peer->stats.rx.pkt_type);
6123 
6124 	index = 0;
6125 	for (i = 0; i < SS_COUNT; i++) {
6126 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6127 				" %d", peer->stats.rx.nss[i]);
6128 	}
6129 	DP_PRINT_STATS("NSS(1-8) = %s",
6130 			nss);
6131 
6132 	DP_PRINT_STATS("Aggregation:");
6133 	DP_PRINT_STATS("	Msdu's Part of Ampdu = %d",
6134 			peer->stats.rx.ampdu_cnt);
6135 	DP_PRINT_STATS("	Msdu's With No Mpdu Level Aggregation = %d",
6136 			peer->stats.rx.non_ampdu_cnt);
6137 	DP_PRINT_STATS("	Msdu's Part of Amsdu = %d",
6138 			peer->stats.rx.amsdu_cnt);
6139 	DP_PRINT_STATS("	Msdu's With No Msdu Level Aggregation = %d",
6140 			peer->stats.rx.non_amsdu_cnt);
6141 
6142 	DP_PRINT_STATS("Bytes and Packets received in last one sec:");
6143 	DP_PRINT_STATS("	Bytes received in last sec: %d",
6144 		       peer->stats.rx.rx_byte_rate);
6145 	DP_PRINT_STATS("	Data received in last sec: %d",
6146 		       peer->stats.rx.rx_data_rate);
6147 }
6148 
6149 /*
6150  * dp_get_host_peer_stats()- function to print peer stats
6151  * @pdev_handle: DP_PDEV handle
6152  * @mac_addr: mac address of the peer
6153  *
6154  * Return: void
6155  */
6156 static void
6157 dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
6158 {
6159 	struct dp_peer *peer;
6160 	uint8_t local_id;
6161 
6162 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
6163 			&local_id);
6164 
6165 	if (!peer) {
6166 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6167 			  "%s: Invalid peer\n", __func__);
6168 		return;
6169 	}
6170 
6171 	dp_print_peer_stats(peer);
6172 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
6173 }
6174 
6175 /**
6176  * dp_txrx_stats_help() - Helper function for Txrx_Stats
6177  *
6178  * Return: None
6179  */
6180 static void dp_txrx_stats_help(void)
6181 {
6182 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
6183 	dp_info("stats_option:");
6184 	dp_info("  1 -- HTT Tx Statistics");
6185 	dp_info("  2 -- HTT Rx Statistics");
6186 	dp_info("  3 -- HTT Tx HW Queue Statistics");
6187 	dp_info("  4 -- HTT Tx HW Sched Statistics");
6188 	dp_info("  5 -- HTT Error Statistics");
6189 	dp_info("  6 -- HTT TQM Statistics");
6190 	dp_info("  7 -- HTT TQM CMDQ Statistics");
6191 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
6192 	dp_info("  9 -- HTT Tx Rate Statistics");
6193 	dp_info(" 10 -- HTT Rx Rate Statistics");
6194 	dp_info(" 11 -- HTT Peer Statistics");
6195 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
6196 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
6197 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
6198 	dp_info(" 15 -- HTT SRNG Statistics");
6199 	dp_info(" 16 -- HTT SFM Info Statistics");
6200 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
6201 	dp_info(" 18 -- HTT Peer List Details");
6202 	dp_info(" 20 -- Clear Host Statistics");
6203 	dp_info(" 21 -- Host Rx Rate Statistics");
6204 	dp_info(" 22 -- Host Tx Rate Statistics");
6205 	dp_info(" 23 -- Host Tx Statistics");
6206 	dp_info(" 24 -- Host Rx Statistics");
6207 	dp_info(" 25 -- Host AST Statistics");
6208 	dp_info(" 26 -- Host SRNG PTR Statistics");
6209 	dp_info(" 27 -- Host Mon Statistics");
6210 	dp_info(" 28 -- Host REO Queue Statistics");
6211 }
6212 
6213 /**
6214  * dp_print_host_stats()- Function to print the stats aggregated at host
6215  * @vdev_handle: DP_VDEV handle
6216  * @type: host stats type
6217  *
6218  * Available Stat types
6219  * TXRX_CLEAR_STATS  : Clear the stats
6220  * TXRX_RX_RATE_STATS: Print Rx Rate Info
6221  * TXRX_TX_RATE_STATS: Print Tx Rate Info
6222  * TXRX_TX_HOST_STATS: Print Tx Stats
6223  * TXRX_RX_HOST_STATS: Print Rx Stats
6224  * TXRX_AST_STATS: Print AST Stats
6225  * TXRX_SRNG_PTR_STATS: Print SRNG ring pointer stats
6226  *
6227  * Return: 0 on success, print error message in case of failure
6228  */
6229 static int
6230 dp_print_host_stats(struct cdp_vdev *vdev_handle,
6231 		    struct cdp_txrx_stats_req *req)
6232 {
6233 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6234 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6235 	enum cdp_host_txrx_stats type =
6236 			dp_stats_mapping_table[req->stats][STATS_HOST];
6237 
6238 	dp_aggregate_pdev_stats(pdev);
6239 
6240 	switch (type) {
6241 	case TXRX_CLEAR_STATS:
6242 		dp_txrx_host_stats_clr(vdev);
6243 		break;
6244 	case TXRX_RX_RATE_STATS:
6245 		dp_print_rx_rates(vdev);
6246 		break;
6247 	case TXRX_TX_RATE_STATS:
6248 		dp_print_tx_rates(vdev);
6249 		break;
6250 	case TXRX_TX_HOST_STATS:
6251 		dp_print_pdev_tx_stats(pdev);
6252 		dp_print_soc_tx_stats(pdev->soc);
6253 		break;
6254 	case TXRX_RX_HOST_STATS:
6255 		dp_print_pdev_rx_stats(pdev);
6256 		dp_print_soc_rx_stats(pdev->soc);
6257 		break;
6258 	case TXRX_AST_STATS:
6259 		dp_print_ast_stats(pdev->soc);
6260 		dp_print_peer_table(vdev);
6261 		break;
6262 	case TXRX_SRNG_PTR_STATS:
6263 		dp_print_ring_stats(pdev);
6264 		break;
6265 	case TXRX_RX_MON_STATS:
6266 		dp_print_pdev_rx_mon_stats(pdev);
6267 		break;
6268 	case TXRX_REO_QUEUE_STATS:
6269 		dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
6270 		break;
6271 	default:
6272 		dp_info("Wrong Input For TxRx Host Stats");
6273 		dp_txrx_stats_help();
6274 		break;
6275 	}
6276 	return 0;
6277 }
6278 
6279 /*
6280  * dp_ppdu_ring_reset()- Reset PPDU Stats ring
6281  * @pdev: DP_PDEV handle
6282  *
6283  * Return: void
6284  */
6285 static void
6286 dp_ppdu_ring_reset(struct dp_pdev *pdev)
6287 {
6288 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
6289 	int mac_id;
6290 
6291 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
6292 
6293 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6294 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6295 							pdev->pdev_id);
6296 
6297 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6298 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6299 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6300 	}
6301 }
6302 
6303 /*
6304  * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
6305  * @pdev: DP_PDEV handle
6306  *
6307  * Return: void
6308  */
6309 static void
6310 dp_ppdu_ring_cfg(struct dp_pdev *pdev)
6311 {
6312 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
6313 	int mac_id;
6314 
6315 	htt_tlv_filter.mpdu_start = 1;
6316 	htt_tlv_filter.msdu_start = 0;
6317 	htt_tlv_filter.packet = 0;
6318 	htt_tlv_filter.msdu_end = 0;
6319 	htt_tlv_filter.mpdu_end = 0;
6320 	htt_tlv_filter.attention = 0;
6321 	htt_tlv_filter.ppdu_start = 1;
6322 	htt_tlv_filter.ppdu_end = 1;
6323 	htt_tlv_filter.ppdu_end_user_stats = 1;
6324 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6325 	htt_tlv_filter.ppdu_end_status_done = 1;
6326 	htt_tlv_filter.enable_fp = 1;
6327 	htt_tlv_filter.enable_md = 0;
6328 	if (pdev->neighbour_peers_added &&
6329 	    pdev->soc->hw_nac_monitor_support) {
6330 		htt_tlv_filter.enable_md = 1;
6331 		htt_tlv_filter.packet_header = 1;
6332 	}
6333 	if (pdev->mcopy_mode) {
6334 		htt_tlv_filter.packet_header = 1;
6335 		htt_tlv_filter.enable_mo = 1;
6336 	}
6337 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6338 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6339 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6340 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6341 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6342 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
6343 	if (pdev->neighbour_peers_added &&
6344 	    pdev->soc->hw_nac_monitor_support)
6345 		htt_tlv_filter.md_data_filter = FILTER_DATA_ALL;
6346 
6347 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6348 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6349 						pdev->pdev_id);
6350 
6351 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6352 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6353 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6354 	}
6355 }
6356 
6357 /*
6358  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
6359  *                              modes are enabled or not.
6360  * @dp_pdev: dp pdev handle.
6361  *
6362  * Return: bool
6363  */
6364 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
6365 {
6366 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
6367 	    !pdev->mcopy_mode)
6368 		return true;
6369 	else
6370 		return false;
6371 }
6372 
6373 /*
6374  *dp_set_bpr_enable() - API to enable/disable bpr feature
6375  *@pdev_handle: DP_PDEV handle.
6376  *@val: Provided value.
6377  *
6378  *Return: void
6379  */
6380 static void
6381 dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
6382 {
6383 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6384 
6385 	switch (val) {
6386 	case CDP_BPR_DISABLE:
6387 		pdev->bpr_enable = CDP_BPR_DISABLE;
6388 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6389 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
6390 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6391 		} else if (pdev->enhanced_stats_en &&
6392 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6393 			   !pdev->pktlog_ppdu_stats) {
6394 			dp_h2t_cfg_stats_msg_send(pdev,
6395 						  DP_PPDU_STATS_CFG_ENH_STATS,
6396 						  pdev->pdev_id);
6397 		}
6398 		break;
6399 	case CDP_BPR_ENABLE:
6400 		pdev->bpr_enable = CDP_BPR_ENABLE;
6401 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
6402 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
6403 			dp_h2t_cfg_stats_msg_send(pdev,
6404 						  DP_PPDU_STATS_CFG_BPR,
6405 						  pdev->pdev_id);
6406 		} else if (pdev->enhanced_stats_en &&
6407 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6408 			   !pdev->pktlog_ppdu_stats) {
6409 			dp_h2t_cfg_stats_msg_send(pdev,
6410 						  DP_PPDU_STATS_CFG_BPR_ENH,
6411 						  pdev->pdev_id);
6412 		} else if (pdev->pktlog_ppdu_stats) {
6413 			dp_h2t_cfg_stats_msg_send(pdev,
6414 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
6415 						  pdev->pdev_id);
6416 		}
6417 		break;
6418 	default:
6419 		break;
6420 	}
6421 }
6422 
6423 /*
6424  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
6425  * @pdev_handle: DP_PDEV handle
6426  * @val: user provided value
6427  *
6428  * Return: void
6429  */
6430 static void
6431 dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
6432 {
6433 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6434 
6435 	switch (val) {
6436 	case 0:
6437 		pdev->tx_sniffer_enable = 0;
6438 		pdev->mcopy_mode = 0;
6439 
6440 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6441 		    !pdev->bpr_enable) {
6442 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6443 			dp_ppdu_ring_reset(pdev);
6444 		} else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
6445 			dp_h2t_cfg_stats_msg_send(pdev,
6446 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
6447 		} else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
6448 			dp_h2t_cfg_stats_msg_send(pdev,
6449 						  DP_PPDU_STATS_CFG_BPR_ENH,
6450 						  pdev->pdev_id);
6451 		} else {
6452 			dp_h2t_cfg_stats_msg_send(pdev,
6453 						  DP_PPDU_STATS_CFG_BPR,
6454 						  pdev->pdev_id);
6455 		}
6456 		break;
6457 
6458 	case 1:
6459 		pdev->tx_sniffer_enable = 1;
6460 		pdev->mcopy_mode = 0;
6461 
6462 		if (!pdev->pktlog_ppdu_stats)
6463 			dp_h2t_cfg_stats_msg_send(pdev,
6464 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
6465 		break;
6466 	case 2:
6467 		pdev->mcopy_mode = 1;
6468 		pdev->tx_sniffer_enable = 0;
6469 		dp_ppdu_ring_cfg(pdev);
6470 
6471 		if (!pdev->pktlog_ppdu_stats)
6472 			dp_h2t_cfg_stats_msg_send(pdev,
6473 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
6474 		break;
6475 	default:
6476 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6477 			"Invalid value");
6478 		break;
6479 	}
6480 }
6481 
6482 /*
6483  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
6484  * @pdev_handle: DP_PDEV handle
6485  *
6486  * Return: void
6487  */
6488 static void
6489 dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
6490 {
6491 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6492 
6493 	if (pdev->enhanced_stats_en == 0)
6494 		dp_cal_client_timer_start(pdev->cal_client_ctx);
6495 
6496 	pdev->enhanced_stats_en = 1;
6497 
6498 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
6499 	    !pdev->monitor_vdev)
6500 		dp_ppdu_ring_cfg(pdev);
6501 
6502 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
6503 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
6504 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
6505 		dp_h2t_cfg_stats_msg_send(pdev,
6506 					  DP_PPDU_STATS_CFG_BPR_ENH,
6507 					  pdev->pdev_id);
6508 	}
6509 }
6510 
6511 /*
6512  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
6513  * @pdev_handle: DP_PDEV handle
6514  *
6515  * Return: void
6516  */
6517 static void
6518 dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
6519 {
6520 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6521 
6522 	if (pdev->enhanced_stats_en == 1)
6523 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
6524 
6525 	pdev->enhanced_stats_en = 0;
6526 
6527 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
6528 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6529 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
6530 		dp_h2t_cfg_stats_msg_send(pdev,
6531 					  DP_PPDU_STATS_CFG_BPR,
6532 					  pdev->pdev_id);
6533 	}
6534 
6535 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
6536 	    !pdev->monitor_vdev)
6537 		dp_ppdu_ring_reset(pdev);
6538 }
6539 
6540 /*
6541  * dp_get_fw_peer_stats()- function to print peer stats
6542  * @pdev_handle: DP_PDEV handle
6543  * @mac_addr: mac address of the peer
6544  * @cap: Type of htt stats requested
6545  *
6546  * Currently Supporting only MAC ID based requests Only
6547  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
6548  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
6549  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
6550  *
6551  * Return: void
6552  */
6553 static void
6554 dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
6555 		uint32_t cap)
6556 {
6557 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6558 	int i;
6559 	uint32_t config_param0 = 0;
6560 	uint32_t config_param1 = 0;
6561 	uint32_t config_param2 = 0;
6562 	uint32_t config_param3 = 0;
6563 
6564 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
6565 	config_param0 |= (1 << (cap + 1));
6566 
6567 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
6568 		config_param1 |= (1 << i);
6569 	}
6570 
6571 	config_param2 |= (mac_addr[0] & 0x000000ff);
6572 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
6573 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
6574 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
6575 
6576 	config_param3 |= (mac_addr[4] & 0x000000ff);
6577 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
6578 
6579 	dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
6580 			config_param0, config_param1, config_param2,
6581 			config_param3, 0, 0, 0);
6582 
6583 }
6584 
6585 /* This struct definition will be removed from here
6586  * once it get added in FW headers*/
6587 struct httstats_cmd_req {
6588     uint32_t    config_param0;
6589     uint32_t    config_param1;
6590     uint32_t    config_param2;
6591     uint32_t    config_param3;
6592     int cookie;
6593     u_int8_t    stats_id;
6594 };
6595 
6596 /*
6597  * dp_get_htt_stats: function to process the httstas request
6598  * @pdev_handle: DP pdev handle
6599  * @data: pointer to request data
6600  * @data_len: length for request data
6601  *
6602  * return: void
6603  */
6604 static void
6605 dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
6606 {
6607 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6608 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
6609 
6610 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
6611 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
6612 				req->config_param0, req->config_param1,
6613 				req->config_param2, req->config_param3,
6614 				req->cookie, 0, 0);
6615 }
6616 
6617 /*
6618  * dp_set_pdev_param: function to set parameters in pdev
6619  * @pdev_handle: DP pdev handle
6620  * @param: parameter type to be set
6621  * @val: value of parameter to be set
6622  *
6623  * return: void
6624  */
6625 static void dp_set_pdev_param(struct cdp_pdev *pdev_handle,
6626 		enum cdp_pdev_param_type param, uint8_t val)
6627 {
6628 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6629 	switch (param) {
6630 	case CDP_CONFIG_DEBUG_SNIFFER:
6631 		dp_config_debug_sniffer(pdev_handle, val);
6632 		break;
6633 	case CDP_CONFIG_BPR_ENABLE:
6634 		dp_set_bpr_enable(pdev_handle, val);
6635 		break;
6636 	case CDP_CONFIG_PRIMARY_RADIO:
6637 		pdev->is_primary = val;
6638 		break;
6639 	default:
6640 		break;
6641 	}
6642 }
6643 
6644 /*
6645  * dp_set_vdev_param: function to set parameters in vdev
6646  * @param: parameter type to be set
6647  * @val: value of parameter to be set
6648  *
6649  * return: void
6650  */
6651 static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
6652 		enum cdp_vdev_param_type param, uint32_t val)
6653 {
6654 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6655 	switch (param) {
6656 	case CDP_ENABLE_WDS:
6657 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6658 			  "wds_enable %d for vdev(%p) id(%d)\n",
6659 			  val, vdev, vdev->vdev_id);
6660 		vdev->wds_enabled = val;
6661 		break;
6662 	case CDP_ENABLE_NAWDS:
6663 		vdev->nawds_enabled = val;
6664 		break;
6665 	case CDP_ENABLE_MCAST_EN:
6666 		vdev->mcast_enhancement_en = val;
6667 		break;
6668 	case CDP_ENABLE_PROXYSTA:
6669 		vdev->proxysta_vdev = val;
6670 		break;
6671 	case CDP_UPDATE_TDLS_FLAGS:
6672 		vdev->tdls_link_connected = val;
6673 		break;
6674 	case CDP_CFG_WDS_AGING_TIMER:
6675 		if (val == 0)
6676 			qdf_timer_stop(&vdev->pdev->soc->wds_aging_timer);
6677 		else if (val != vdev->wds_aging_timer_val)
6678 			qdf_timer_mod(&vdev->pdev->soc->wds_aging_timer, val);
6679 
6680 		vdev->wds_aging_timer_val = val;
6681 		break;
6682 	case CDP_ENABLE_AP_BRIDGE:
6683 		if (wlan_op_mode_sta != vdev->opmode)
6684 			vdev->ap_bridge_enabled = val;
6685 		else
6686 			vdev->ap_bridge_enabled = false;
6687 		break;
6688 	case CDP_ENABLE_CIPHER:
6689 		vdev->sec_type = val;
6690 		break;
6691 	case CDP_ENABLE_QWRAP_ISOLATION:
6692 		vdev->isolation_vdev = val;
6693 		break;
6694 	default:
6695 		break;
6696 	}
6697 
6698 	dp_tx_vdev_update_search_flags(vdev);
6699 }
6700 
6701 /**
6702  * dp_peer_set_nawds: set nawds bit in peer
6703  * @peer_handle: pointer to peer
6704  * @value: enable/disable nawds
6705  *
6706  * return: void
6707  */
6708 static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
6709 {
6710 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6711 	peer->nawds_enabled = value;
6712 }
6713 
6714 /*
6715  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
6716  * @vdev_handle: DP_VDEV handle
6717  * @map_id:ID of map that needs to be updated
6718  *
6719  * Return: void
6720  */
6721 static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
6722 		uint8_t map_id)
6723 {
6724 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6725 	vdev->dscp_tid_map_id = map_id;
6726 	return;
6727 }
6728 
6729 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
6730  * @peer_handle: DP_PEER handle
6731  *
6732  * return : cdp_peer_stats pointer
6733  */
6734 static struct cdp_peer_stats*
6735 		dp_txrx_get_peer_stats(struct cdp_peer *peer_handle)
6736 {
6737 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6738 
6739 	qdf_assert(peer);
6740 
6741 	return &peer->stats;
6742 }
6743 
6744 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
6745  * @peer_handle: DP_PEER handle
6746  *
6747  * return : void
6748  */
6749 static void dp_txrx_reset_peer_stats(struct cdp_peer *peer_handle)
6750 {
6751 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6752 
6753 	qdf_assert(peer);
6754 
6755 	qdf_mem_set(&peer->stats, sizeof(peer->stats), 0);
6756 }
6757 
6758 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
6759  * @vdev_handle: DP_VDEV handle
6760  * @buf: buffer for vdev stats
6761  *
6762  * return : int
6763  */
6764 static int  dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
6765 				   bool is_aggregate)
6766 {
6767 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6768 	struct cdp_vdev_stats *vdev_stats = (struct cdp_vdev_stats *)buf;
6769 
6770 	if (is_aggregate)
6771 		dp_aggregate_vdev_stats(vdev, buf);
6772 	else
6773 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
6774 
6775 	return 0;
6776 }
6777 
6778 /*
6779  * dp_get_total_per(): get total per
6780  * @pdev_handle: DP_PDEV handle
6781  *
6782  * Return: % error rate using retries per packet and success packets
6783  */
6784 static int dp_get_total_per(struct cdp_pdev *pdev_handle)
6785 {
6786 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6787 
6788 	dp_aggregate_pdev_stats(pdev);
6789 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
6790 		return 0;
6791 	return ((pdev->stats.tx.retries * 100) /
6792 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
6793 }
6794 
6795 /*
6796  * dp_txrx_stats_publish(): publish pdev stats into a buffer
6797  * @pdev_handle: DP_PDEV handle
6798  * @buf: to hold pdev_stats
6799  *
6800  * Return: int
6801  */
6802 static int
6803 dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
6804 {
6805 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6806 	struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
6807 	struct cdp_txrx_stats_req req = {0,};
6808 
6809 	dp_aggregate_pdev_stats(pdev);
6810 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
6811 	req.cookie_val = 1;
6812 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
6813 				req.param1, req.param2, req.param3, 0,
6814 				req.cookie_val, 0);
6815 
6816 	msleep(DP_MAX_SLEEP_TIME);
6817 
6818 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
6819 	req.cookie_val = 1;
6820 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
6821 				req.param1, req.param2, req.param3, 0,
6822 				req.cookie_val, 0);
6823 
6824 	msleep(DP_MAX_SLEEP_TIME);
6825 	qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
6826 
6827 	return TXRX_STATS_LEVEL;
6828 }
6829 
6830 /**
6831  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
6832  * @pdev: DP_PDEV handle
6833  * @map_id: ID of map that needs to be updated
6834  * @tos: index value in map
6835  * @tid: tid value passed by the user
6836  *
6837  * Return: void
6838  */
6839 static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
6840 		uint8_t map_id, uint8_t tos, uint8_t tid)
6841 {
6842 	uint8_t dscp;
6843 	struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
6844 	struct dp_soc *soc = pdev->soc;
6845 
6846 	if (!soc)
6847 		return;
6848 
6849 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
6850 	pdev->dscp_tid_map[map_id][dscp] = tid;
6851 
6852 	if (map_id < soc->num_hw_dscp_tid_map)
6853 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
6854 				       map_id, dscp);
6855 	return;
6856 }
6857 
6858 /**
6859  * dp_fw_stats_process(): Process TxRX FW stats request
6860  * @vdev_handle: DP VDEV handle
6861  * @req: stats request
6862  *
6863  * return: int
6864  */
6865 static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
6866 		struct cdp_txrx_stats_req *req)
6867 {
6868 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6869 	struct dp_pdev *pdev = NULL;
6870 	uint32_t stats = req->stats;
6871 	uint8_t mac_id = req->mac_id;
6872 
6873 	if (!vdev) {
6874 		DP_TRACE(NONE, "VDEV not found");
6875 		return 1;
6876 	}
6877 	pdev = vdev->pdev;
6878 
6879 	/*
6880 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
6881 	 * from param0 to param3 according to below rule:
6882 	 *
6883 	 * PARAM:
6884 	 *   - config_param0 : start_offset (stats type)
6885 	 *   - config_param1 : stats bmask from start offset
6886 	 *   - config_param2 : stats bmask from start offset + 32
6887 	 *   - config_param3 : stats bmask from start offset + 64
6888 	 */
6889 	if (req->stats == CDP_TXRX_STATS_0) {
6890 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
6891 		req->param1 = 0xFFFFFFFF;
6892 		req->param2 = 0xFFFFFFFF;
6893 		req->param3 = 0xFFFFFFFF;
6894 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
6895 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
6896 	}
6897 
6898 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
6899 				req->param1, req->param2, req->param3,
6900 				0, 0, mac_id);
6901 }
6902 
6903 /**
6904  * dp_txrx_stats_request - function to map to firmware and host stats
6905  * @vdev: virtual handle
6906  * @req: stats request
6907  *
6908  * Return: QDF_STATUS
6909  */
6910 static
6911 QDF_STATUS dp_txrx_stats_request(struct cdp_vdev *vdev,
6912 				 struct cdp_txrx_stats_req *req)
6913 {
6914 	int host_stats;
6915 	int fw_stats;
6916 	enum cdp_stats stats;
6917 	int num_stats;
6918 
6919 	if (!vdev || !req) {
6920 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6921 				"Invalid vdev/req instance");
6922 		return QDF_STATUS_E_INVAL;
6923 	}
6924 
6925 	stats = req->stats;
6926 	if (stats >= CDP_TXRX_MAX_STATS)
6927 		return QDF_STATUS_E_INVAL;
6928 
6929 	/*
6930 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
6931 	 *			has to be updated if new FW HTT stats added
6932 	 */
6933 	if (stats > CDP_TXRX_STATS_HTT_MAX)
6934 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
6935 
6936 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
6937 
6938 	if (stats >= num_stats) {
6939 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6940 			  "%s: Invalid stats option: %d", __func__, stats);
6941 		return QDF_STATUS_E_INVAL;
6942 	}
6943 
6944 	req->stats = stats;
6945 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
6946 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
6947 
6948 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6949 		 "stats: %u fw_stats_type: %d host_stats: %d",
6950 		  stats, fw_stats, host_stats);
6951 
6952 	if (fw_stats != TXRX_FW_STATS_INVALID) {
6953 		/* update request with FW stats type */
6954 		req->stats = fw_stats;
6955 		return dp_fw_stats_process(vdev, req);
6956 	}
6957 
6958 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
6959 			(host_stats <= TXRX_HOST_STATS_MAX))
6960 		return dp_print_host_stats(vdev, req);
6961 	else
6962 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6963 				"Wrong Input for TxRx Stats");
6964 
6965 	return QDF_STATUS_SUCCESS;
6966 }
6967 
6968 /*
6969  * dp_print_napi_stats(): NAPI stats
6970  * @soc - soc handle
6971  */
6972 static void dp_print_napi_stats(struct dp_soc *soc)
6973 {
6974 	hif_print_napi_stats(soc->hif_handle);
6975 }
6976 
6977 /*
6978  * dp_print_per_ring_stats(): Packet count per ring
6979  * @soc - soc handle
6980  */
6981 static void dp_print_per_ring_stats(struct dp_soc *soc)
6982 {
6983 	uint8_t ring;
6984 	uint16_t core;
6985 	uint64_t total_packets;
6986 
6987 	DP_TRACE_STATS(INFO_HIGH, "Reo packets per ring:");
6988 	for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
6989 		total_packets = 0;
6990 		DP_TRACE_STATS(INFO_HIGH,
6991 			       "Packets on ring %u:", ring);
6992 		for (core = 0; core < NR_CPUS; core++) {
6993 			DP_TRACE_STATS(INFO_HIGH,
6994 				       "Packets arriving on core %u: %llu",
6995 				       core,
6996 				       soc->stats.rx.ring_packets[core][ring]);
6997 			total_packets += soc->stats.rx.ring_packets[core][ring];
6998 		}
6999 		DP_TRACE_STATS(INFO_HIGH,
7000 			       "Total packets on ring %u: %llu",
7001 			       ring, total_packets);
7002 	}
7003 }
7004 
7005 /*
7006  * dp_txrx_path_stats() - Function to display dump stats
7007  * @soc - soc handle
7008  *
7009  * return: none
7010  */
7011 static void dp_txrx_path_stats(struct dp_soc *soc)
7012 {
7013 	uint8_t error_code;
7014 	uint8_t loop_pdev;
7015 	struct dp_pdev *pdev;
7016 	uint8_t i;
7017 
7018 	if (!soc) {
7019 		DP_TRACE(ERROR, "%s: Invalid access",
7020 			 __func__);
7021 		return;
7022 	}
7023 
7024 	for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
7025 
7026 		pdev = soc->pdev_list[loop_pdev];
7027 		dp_aggregate_pdev_stats(pdev);
7028 		DP_TRACE_STATS(INFO_HIGH, "Tx path Statistics:");
7029 		DP_TRACE_STATS(INFO_HIGH, "from stack: %u msdus (%llu bytes)",
7030 			       pdev->stats.tx_i.rcvd.num,
7031 			       pdev->stats.tx_i.rcvd.bytes);
7032 		DP_TRACE_STATS(INFO_HIGH,
7033 			       "processed from host: %u msdus (%llu bytes)",
7034 			       pdev->stats.tx_i.processed.num,
7035 			       pdev->stats.tx_i.processed.bytes);
7036 		DP_TRACE_STATS(INFO_HIGH,
7037 			       "successfully transmitted: %u msdus (%llu bytes)",
7038 			       pdev->stats.tx.tx_success.num,
7039 			       pdev->stats.tx.tx_success.bytes);
7040 
7041 		DP_TRACE_STATS(INFO_HIGH, "Dropped in host:");
7042 		DP_TRACE_STATS(INFO_HIGH, "Total packets dropped: %u,",
7043 			       pdev->stats.tx_i.dropped.dropped_pkt.num);
7044 		DP_TRACE_STATS(INFO_HIGH, "Descriptor not available: %u",
7045 			       pdev->stats.tx_i.dropped.desc_na.num);
7046 		DP_TRACE_STATS(INFO_HIGH, "Ring full: %u",
7047 			       pdev->stats.tx_i.dropped.ring_full);
7048 		DP_TRACE_STATS(INFO_HIGH, "Enqueue fail: %u",
7049 			       pdev->stats.tx_i.dropped.enqueue_fail);
7050 		DP_TRACE_STATS(INFO_HIGH, "DMA Error: %u",
7051 			       pdev->stats.tx_i.dropped.dma_error);
7052 
7053 		DP_TRACE_STATS(INFO_HIGH, "Dropped in hardware:");
7054 		DP_TRACE_STATS(INFO_HIGH, "total packets dropped: %u",
7055 			       pdev->stats.tx.tx_failed);
7056 		DP_TRACE_STATS(INFO_HIGH, "mpdu age out: %u",
7057 			       pdev->stats.tx.dropped.age_out);
7058 		DP_TRACE_STATS(INFO_HIGH, "firmware removed: %u",
7059 			       pdev->stats.tx.dropped.fw_rem);
7060 		DP_TRACE_STATS(INFO_HIGH, "firmware removed tx: %u",
7061 			       pdev->stats.tx.dropped.fw_rem_tx);
7062 		DP_TRACE_STATS(INFO_HIGH, "firmware removed notx %u",
7063 			       pdev->stats.tx.dropped.fw_rem_notx);
7064 		DP_TRACE_STATS(INFO_HIGH, "peer_invalid: %u",
7065 			       pdev->soc->stats.tx.tx_invalid_peer.num);
7066 
7067 		DP_TRACE_STATS(INFO_HIGH, "Tx packets sent per interrupt:");
7068 		DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
7069 			       pdev->stats.tx_comp_histogram.pkts_1);
7070 		DP_TRACE_STATS(INFO_HIGH, "2-20 Packets:  %u",
7071 			       pdev->stats.tx_comp_histogram.pkts_2_20);
7072 		DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
7073 			       pdev->stats.tx_comp_histogram.pkts_21_40);
7074 		DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
7075 			       pdev->stats.tx_comp_histogram.pkts_41_60);
7076 		DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
7077 			       pdev->stats.tx_comp_histogram.pkts_61_80);
7078 		DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
7079 			       pdev->stats.tx_comp_histogram.pkts_81_100);
7080 		DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
7081 			       pdev->stats.tx_comp_histogram.pkts_101_200);
7082 		DP_TRACE_STATS(INFO_HIGH, "   201+ Packets: %u",
7083 			       pdev->stats.tx_comp_histogram.pkts_201_plus);
7084 
7085 		DP_TRACE_STATS(INFO_HIGH, "Rx path statistics");
7086 
7087 		DP_TRACE_STATS(INFO_HIGH,
7088 			       "delivered %u msdus ( %llu bytes),",
7089 			       pdev->stats.rx.to_stack.num,
7090 			       pdev->stats.rx.to_stack.bytes);
7091 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)
7092 			DP_TRACE_STATS(INFO_HIGH,
7093 				       "received on reo[%d] %u msdus( %llu bytes),",
7094 				       i, pdev->stats.rx.rcvd_reo[i].num,
7095 				       pdev->stats.rx.rcvd_reo[i].bytes);
7096 		DP_TRACE_STATS(INFO_HIGH,
7097 			       "intra-bss packets %u msdus ( %llu bytes),",
7098 			       pdev->stats.rx.intra_bss.pkts.num,
7099 			       pdev->stats.rx.intra_bss.pkts.bytes);
7100 		DP_TRACE_STATS(INFO_HIGH,
7101 			       "intra-bss fails %u msdus ( %llu bytes),",
7102 			       pdev->stats.rx.intra_bss.fail.num,
7103 			       pdev->stats.rx.intra_bss.fail.bytes);
7104 		DP_TRACE_STATS(INFO_HIGH,
7105 			       "raw packets %u msdus ( %llu bytes),",
7106 			       pdev->stats.rx.raw.num,
7107 			       pdev->stats.rx.raw.bytes);
7108 		DP_TRACE_STATS(INFO_HIGH, "dropped: error %u msdus",
7109 			       pdev->stats.rx.err.mic_err);
7110 		DP_TRACE_STATS(INFO_HIGH, "peer invalid %u",
7111 			       pdev->soc->stats.rx.err.rx_invalid_peer.num);
7112 
7113 		DP_TRACE_STATS(INFO_HIGH, "Reo Statistics");
7114 		DP_TRACE_STATS(INFO_HIGH, "rbm error: %u msdus",
7115 			       pdev->soc->stats.rx.err.invalid_rbm);
7116 		DP_TRACE_STATS(INFO_HIGH, "hal ring access fail: %u msdus",
7117 			       pdev->soc->stats.rx.err.hal_ring_access_fail);
7118 
7119 		for (error_code = 0; error_code < HAL_REO_ERR_MAX;
7120 				error_code++) {
7121 			if (!pdev->soc->stats.rx.err.reo_error[error_code])
7122 				continue;
7123 			DP_TRACE_STATS(INFO_HIGH,
7124 				       "Reo error number (%u): %u msdus",
7125 				       error_code,
7126 				       pdev->soc->stats.rx.err
7127 				       .reo_error[error_code]);
7128 		}
7129 
7130 		for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
7131 				error_code++) {
7132 			if (!pdev->soc->stats.rx.err.rxdma_error[error_code])
7133 				continue;
7134 			DP_TRACE_STATS(INFO_HIGH,
7135 				       "Rxdma error number (%u): %u msdus",
7136 				       error_code,
7137 				       pdev->soc->stats.rx.err
7138 				       .rxdma_error[error_code]);
7139 		}
7140 
7141 		DP_TRACE_STATS(INFO_HIGH, "Rx packets reaped per interrupt:");
7142 		DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
7143 			       pdev->stats.rx_ind_histogram.pkts_1);
7144 		DP_TRACE_STATS(INFO_HIGH, "2-20 Packets:  %u",
7145 			       pdev->stats.rx_ind_histogram.pkts_2_20);
7146 		DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
7147 			       pdev->stats.rx_ind_histogram.pkts_21_40);
7148 		DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
7149 			       pdev->stats.rx_ind_histogram.pkts_41_60);
7150 		DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
7151 			       pdev->stats.rx_ind_histogram.pkts_61_80);
7152 		DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
7153 			       pdev->stats.rx_ind_histogram.pkts_81_100);
7154 		DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
7155 			       pdev->stats.rx_ind_histogram.pkts_101_200);
7156 		DP_TRACE_STATS(INFO_HIGH, "   201+ Packets: %u",
7157 			       pdev->stats.rx_ind_histogram.pkts_201_plus);
7158 
7159 		DP_TRACE_STATS(INFO_HIGH, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
7160 			       __func__,
7161 			       pdev->soc->wlan_cfg_ctx
7162 			       ->tso_enabled,
7163 			       pdev->soc->wlan_cfg_ctx
7164 			       ->lro_enabled,
7165 			       pdev->soc->wlan_cfg_ctx
7166 			       ->rx_hash,
7167 			       pdev->soc->wlan_cfg_ctx
7168 			       ->napi_enabled);
7169 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7170 		DP_TRACE_STATS(INFO_HIGH, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
7171 			       __func__,
7172 			       pdev->soc->wlan_cfg_ctx
7173 			       ->tx_flow_stop_queue_threshold,
7174 			       pdev->soc->wlan_cfg_ctx
7175 			       ->tx_flow_start_queue_offset);
7176 #endif
7177 	}
7178 }
7179 
7180 /*
7181  * dp_txrx_dump_stats() -  Dump statistics
7182  * @value - Statistics option
7183  */
7184 static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
7185 				     enum qdf_stats_verbosity_level level)
7186 {
7187 	struct dp_soc *soc =
7188 		(struct dp_soc *)psoc;
7189 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7190 
7191 	if (!soc) {
7192 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7193 			"%s: soc is NULL", __func__);
7194 		return QDF_STATUS_E_INVAL;
7195 	}
7196 
7197 	switch (value) {
7198 	case CDP_TXRX_PATH_STATS:
7199 		dp_txrx_path_stats(soc);
7200 		break;
7201 
7202 	case CDP_RX_RING_STATS:
7203 		dp_print_per_ring_stats(soc);
7204 		break;
7205 
7206 	case CDP_TXRX_TSO_STATS:
7207 		/* TODO: NOT IMPLEMENTED */
7208 		break;
7209 
7210 	case CDP_DUMP_TX_FLOW_POOL_INFO:
7211 		cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
7212 		break;
7213 
7214 	case CDP_DP_NAPI_STATS:
7215 		dp_print_napi_stats(soc);
7216 		break;
7217 
7218 	case CDP_TXRX_DESC_STATS:
7219 		/* TODO: NOT IMPLEMENTED */
7220 		break;
7221 
7222 	default:
7223 		status = QDF_STATUS_E_INVAL;
7224 		break;
7225 	}
7226 
7227 	return status;
7228 
7229 }
7230 
7231 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7232 /**
7233  * dp_update_flow_control_parameters() - API to store datapath
7234  *                            config parameters
7235  * @soc: soc handle
7236  * @cfg: ini parameter handle
7237  *
7238  * Return: void
7239  */
7240 static inline
7241 void dp_update_flow_control_parameters(struct dp_soc *soc,
7242 				struct cdp_config_params *params)
7243 {
7244 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
7245 					params->tx_flow_stop_queue_threshold;
7246 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
7247 					params->tx_flow_start_queue_offset;
7248 }
7249 #else
7250 static inline
7251 void dp_update_flow_control_parameters(struct dp_soc *soc,
7252 				struct cdp_config_params *params)
7253 {
7254 }
7255 #endif
7256 
7257 /**
7258  * dp_update_config_parameters() - API to store datapath
7259  *                            config parameters
7260  * @soc: soc handle
7261  * @cfg: ini parameter handle
7262  *
7263  * Return: status
7264  */
7265 static
7266 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
7267 				struct cdp_config_params *params)
7268 {
7269 	struct dp_soc *soc = (struct dp_soc *)psoc;
7270 
7271 	if (!(soc)) {
7272 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7273 				"%s: Invalid handle", __func__);
7274 		return QDF_STATUS_E_INVAL;
7275 	}
7276 
7277 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
7278 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
7279 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
7280 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
7281 				params->tcp_udp_checksumoffload;
7282 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
7283 	dp_update_flow_control_parameters(soc, params);
7284 
7285 	return QDF_STATUS_SUCCESS;
7286 }
7287 
7288 /**
7289  * dp_txrx_set_wds_rx_policy() - API to store datapath
7290  *                            config parameters
7291  * @vdev_handle - datapath vdev handle
7292  * @cfg: ini parameter handle
7293  *
7294  * Return: status
7295  */
7296 #ifdef WDS_VENDOR_EXTENSION
7297 void
7298 dp_txrx_set_wds_rx_policy(
7299 		struct cdp_vdev *vdev_handle,
7300 		u_int32_t val)
7301 {
7302 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7303 	struct dp_peer *peer;
7304 	if (vdev->opmode == wlan_op_mode_ap) {
7305 		/* for ap, set it on bss_peer */
7306 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
7307 			if (peer->bss_peer) {
7308 				peer->wds_ecm.wds_rx_filter = 1;
7309 				peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7310 				peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7311 				break;
7312 			}
7313 		}
7314 	} else if (vdev->opmode == wlan_op_mode_sta) {
7315 		peer = TAILQ_FIRST(&vdev->peer_list);
7316 		peer->wds_ecm.wds_rx_filter = 1;
7317 		peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7318 		peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7319 	}
7320 }
7321 
7322 /**
7323  * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
7324  *
7325  * @peer_handle - datapath peer handle
7326  * @wds_tx_ucast: policy for unicast transmission
7327  * @wds_tx_mcast: policy for multicast transmission
7328  *
7329  * Return: void
7330  */
7331 void
7332 dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
7333 		int wds_tx_ucast, int wds_tx_mcast)
7334 {
7335 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
7336 	if (wds_tx_ucast || wds_tx_mcast) {
7337 		peer->wds_enabled = 1;
7338 		peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
7339 		peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
7340 	} else {
7341 		peer->wds_enabled = 0;
7342 		peer->wds_ecm.wds_tx_ucast_4addr = 0;
7343 		peer->wds_ecm.wds_tx_mcast_4addr = 0;
7344 	}
7345 
7346 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7347 			FL("Policy Update set to :\
7348 				peer->wds_enabled %d\
7349 				peer->wds_ecm.wds_tx_ucast_4addr %d\
7350 				peer->wds_ecm.wds_tx_mcast_4addr %d"),
7351 				peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
7352 				peer->wds_ecm.wds_tx_mcast_4addr);
7353 	return;
7354 }
7355 #endif
7356 
7357 static struct cdp_wds_ops dp_ops_wds = {
7358 	.vdev_set_wds = dp_vdev_set_wds,
7359 #ifdef WDS_VENDOR_EXTENSION
7360 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
7361 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
7362 #endif
7363 };
7364 
7365 /*
7366  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
7367  * @vdev_handle - datapath vdev handle
7368  * @callback - callback function
7369  * @ctxt: callback context
7370  *
7371  */
7372 static void
7373 dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
7374 		       ol_txrx_data_tx_cb callback, void *ctxt)
7375 {
7376 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7377 
7378 	vdev->tx_non_std_data_callback.func = callback;
7379 	vdev->tx_non_std_data_callback.ctxt = ctxt;
7380 }
7381 
7382 /**
7383  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
7384  * @pdev_hdl: datapath pdev handle
7385  *
7386  * Return: opaque pointer to dp txrx handle
7387  */
7388 static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
7389 {
7390 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7391 
7392 	return pdev->dp_txrx_handle;
7393 }
7394 
7395 /**
7396  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
7397  * @pdev_hdl: datapath pdev handle
7398  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
7399  *
7400  * Return: void
7401  */
7402 static void
7403 dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
7404 {
7405 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7406 
7407 	pdev->dp_txrx_handle = dp_txrx_hdl;
7408 }
7409 
7410 /**
7411  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
7412  * @soc_handle: datapath soc handle
7413  *
7414  * Return: opaque pointer to external dp (non-core DP)
7415  */
7416 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
7417 {
7418 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
7419 
7420 	return soc->external_txrx_handle;
7421 }
7422 
7423 /**
7424  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
7425  * @soc_handle: datapath soc handle
7426  * @txrx_handle: opaque pointer to external dp (non-core DP)
7427  *
7428  * Return: void
7429  */
7430 static void
7431 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
7432 {
7433 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
7434 
7435 	soc->external_txrx_handle = txrx_handle;
7436 }
7437 
7438 /**
7439  * dp_get_cfg_capabilities() - get dp capabilities
7440  * @soc_handle: datapath soc handle
7441  * @dp_caps: enum for dp capabilities
7442  *
7443  * Return: bool to determine if dp caps is enabled
7444  */
7445 static bool
7446 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
7447 			enum cdp_capabilities dp_caps)
7448 {
7449 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
7450 
7451 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
7452 }
7453 
7454 #ifdef FEATURE_AST
7455 static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
7456 {
7457 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
7458 	struct dp_peer *peer = (struct dp_peer *) peer_hdl;
7459 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7460 
7461 	/*
7462 	 * For BSS peer, new peer is not created on alloc_node if the
7463 	 * peer with same address already exists , instead refcnt is
7464 	 * increased for existing peer. Correspondingly in delete path,
7465 	 * only refcnt is decreased; and peer is only deleted , when all
7466 	 * references are deleted. So delete_in_progress should not be set
7467 	 * for bss_peer, unless only 2 reference remains (peer map reference
7468 	 * and peer hash table reference).
7469 	 */
7470 	if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
7471 		return;
7472 	}
7473 
7474 	peer->delete_in_progress = true;
7475 	dp_peer_delete_ast_entries(soc, peer);
7476 }
7477 #endif
7478 
7479 #ifdef ATH_SUPPORT_NAC_RSSI
7480 /**
7481  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
7482  * @vdev_hdl: DP vdev handle
7483  * @rssi: rssi value
7484  *
7485  * Return: 0 for success. nonzero for failure.
7486  */
7487 QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
7488 				       char *mac_addr,
7489 				       uint8_t *rssi)
7490 {
7491 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7492 	struct dp_pdev *pdev = vdev->pdev;
7493 	struct dp_neighbour_peer *peer = NULL;
7494 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
7495 
7496 	*rssi = 0;
7497 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
7498 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
7499 		      neighbour_peer_list_elem) {
7500 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
7501 				mac_addr, DP_MAC_ADDR_LEN) == 0) {
7502 			*rssi = peer->rssi;
7503 			status = QDF_STATUS_SUCCESS;
7504 			break;
7505 		}
7506 	}
7507 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
7508 	return status;
7509 }
7510 
7511 static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
7512 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
7513 		uint8_t chan_num)
7514 {
7515 
7516 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7517 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7518 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7519 
7520 	pdev->nac_rssi_filtering = 1;
7521 	/* Store address of NAC (neighbour peer) which will be checked
7522 	 * against TA of received packets.
7523 	 */
7524 
7525 	if (cmd == CDP_NAC_PARAM_ADD) {
7526 		dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
7527 						 client_macaddr);
7528 	} else if (cmd == CDP_NAC_PARAM_DEL) {
7529 		dp_update_filter_neighbour_peers(vdev_handle,
7530 						 DP_NAC_PARAM_DEL,
7531 						 client_macaddr);
7532 	}
7533 
7534 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
7535 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
7536 			((void *)vdev->pdev->ctrl_pdev,
7537 			 vdev->vdev_id, cmd, bssid);
7538 
7539 	return QDF_STATUS_SUCCESS;
7540 }
7541 #endif
7542 
7543 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
7544 					   uint32_t max_peers,
7545 					   bool peer_map_unmap_v2)
7546 {
7547 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7548 
7549 	soc->max_peers = max_peers;
7550 
7551 	qdf_print ("%s max_peers %u\n", __func__, max_peers);
7552 
7553 	if (dp_peer_find_attach(soc))
7554 		return QDF_STATUS_E_FAILURE;
7555 
7556 	soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
7557 
7558 	return QDF_STATUS_SUCCESS;
7559 }
7560 
7561 /**
7562  * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
7563  * @dp_pdev: dp pdev handle
7564  * @ctrl_pdev: UMAC ctrl pdev handle
7565  *
7566  * Return: void
7567  */
7568 static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
7569 				  struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
7570 {
7571 	struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
7572 
7573 	pdev->ctrl_pdev = ctrl_pdev;
7574 }
7575 
7576 /*
7577  * dp_get_cfg() - get dp cfg
7578  * @soc: cdp soc handle
7579  * @cfg: cfg enum
7580  *
7581  * Return: cfg value
7582  */
7583 static uint32_t dp_get_cfg(void *soc, enum cdp_dp_cfg cfg)
7584 {
7585 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
7586 	uint32_t value = 0;
7587 
7588 	switch (cfg) {
7589 	case cfg_dp_enable_data_stall:
7590 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
7591 		break;
7592 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
7593 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
7594 		break;
7595 	case cfg_dp_tso_enable:
7596 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
7597 		break;
7598 	case cfg_dp_lro_enable:
7599 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
7600 		break;
7601 	case cfg_dp_gro_enable:
7602 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
7603 		break;
7604 	case cfg_dp_tx_flow_start_queue_offset:
7605 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
7606 		break;
7607 	case cfg_dp_tx_flow_stop_queue_threshold:
7608 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
7609 		break;
7610 	case cfg_dp_disable_intra_bss_fwd:
7611 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
7612 		break;
7613 	default:
7614 		value =  0;
7615 	}
7616 
7617 	return value;
7618 }
7619 
7620 static struct cdp_cmn_ops dp_ops_cmn = {
7621 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
7622 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
7623 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
7624 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
7625 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
7626 	.txrx_peer_create = dp_peer_create_wifi3,
7627 	.txrx_peer_setup = dp_peer_setup_wifi3,
7628 #ifdef FEATURE_AST
7629 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
7630 #else
7631 	.txrx_peer_teardown = NULL,
7632 #endif
7633 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
7634 	.txrx_peer_del_ast = dp_peer_del_ast_wifi3,
7635 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
7636 	.txrx_peer_ast_hash_find_soc = dp_peer_ast_hash_find_soc_wifi3,
7637 	.txrx_peer_ast_hash_find_by_pdevid =
7638 		dp_peer_ast_hash_find_by_pdevid_wifi3,
7639 	.txrx_peer_ast_get_pdev_id = dp_peer_ast_get_pdev_id_wifi3,
7640 	.txrx_peer_ast_get_next_hop = dp_peer_ast_get_next_hop_wifi3,
7641 	.txrx_peer_ast_set_type = dp_peer_ast_set_type_wifi3,
7642 	.txrx_peer_ast_get_type = dp_peer_ast_get_type_wifi3,
7643 	.txrx_peer_ast_get_peer = dp_peer_ast_get_peer_wifi3,
7644 	.txrx_peer_ast_get_nexthop_peer_id =
7645 		dp_peer_ast_get_nexhop_peer_id_wifi3,
7646 #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND)
7647 	.txrx_peer_ast_set_cp_ctx = dp_peer_ast_set_cp_ctx_wifi3,
7648 	.txrx_peer_ast_get_cp_ctx = dp_peer_ast_get_cp_ctx_wifi3,
7649 	.txrx_peer_ast_get_wmi_sent = dp_peer_ast_get_wmi_sent_wifi3,
7650 	.txrx_peer_ast_free_entry = dp_peer_ast_free_entry_wifi3,
7651 #endif
7652 	.txrx_peer_delete = dp_peer_delete_wifi3,
7653 	.txrx_vdev_register = dp_vdev_register_wifi3,
7654 	.txrx_soc_detach = dp_soc_detach_wifi3,
7655 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
7656 	.txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
7657 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
7658 	.txrx_ath_getstats = dp_get_device_stats,
7659 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
7660 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
7661 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
7662 	.delba_process = dp_delba_process_wifi3,
7663 	.set_addba_response = dp_set_addba_response,
7664 	.get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
7665 	.flush_cache_rx_queue = NULL,
7666 	/* TODO: get API's for dscp-tid need to be added*/
7667 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
7668 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
7669 	.txrx_get_total_per = dp_get_total_per,
7670 	.txrx_stats_request = dp_txrx_stats_request,
7671 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
7672 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
7673 	.txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
7674 	.txrx_set_nac = dp_set_nac,
7675 	.txrx_get_tx_pending = dp_get_tx_pending,
7676 	.txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
7677 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
7678 	.display_stats = dp_txrx_dump_stats,
7679 	.txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
7680 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
7681 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
7682 	.txrx_intr_detach = dp_soc_interrupt_detach,
7683 	.set_pn_check = dp_set_pn_check_wifi3,
7684 	.update_config_parameters = dp_update_config_parameters,
7685 	/* TODO: Add other functions */
7686 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
7687 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
7688 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
7689 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
7690 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
7691 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
7692 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
7693 	.tx_send = dp_tx_send,
7694 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
7695 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
7696 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
7697 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
7698 	.txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
7699 	.txrx_get_os_rx_handles_from_vdev =
7700 					dp_get_os_rx_handles_from_vdev_wifi3,
7701 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
7702 	.get_dp_capabilities = dp_get_cfg_capabilities,
7703 	.txrx_get_cfg = dp_get_cfg,
7704 };
7705 
7706 static struct cdp_ctrl_ops dp_ops_ctrl = {
7707 	.txrx_peer_authorize = dp_peer_authorize,
7708 	.txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
7709 	.txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
7710 #ifdef MESH_MODE_SUPPORT
7711 	.txrx_set_mesh_mode  = dp_peer_set_mesh_mode,
7712 	.txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
7713 #endif
7714 	.txrx_set_vdev_param = dp_set_vdev_param,
7715 	.txrx_peer_set_nawds = dp_peer_set_nawds,
7716 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
7717 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
7718 	.txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
7719 	.txrx_update_filter_neighbour_peers =
7720 		dp_update_filter_neighbour_peers,
7721 	.txrx_get_sec_type = dp_get_sec_type,
7722 	/* TODO: Add other functions */
7723 	.txrx_wdi_event_sub = dp_wdi_event_sub,
7724 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
7725 #ifdef WDI_EVENT_ENABLE
7726 	.txrx_get_pldev = dp_get_pldev,
7727 #endif
7728 	.txrx_set_pdev_param = dp_set_pdev_param,
7729 #ifdef ATH_SUPPORT_NAC_RSSI
7730 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
7731 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
7732 #endif
7733 	.set_key = dp_set_michael_key,
7734 };
7735 
7736 static struct cdp_me_ops dp_ops_me = {
7737 #ifdef ATH_SUPPORT_IQUE
7738 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
7739 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
7740 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
7741 #endif
7742 };
7743 
7744 static struct cdp_mon_ops dp_ops_mon = {
7745 	.txrx_monitor_set_filter_ucast_data = NULL,
7746 	.txrx_monitor_set_filter_mcast_data = NULL,
7747 	.txrx_monitor_set_filter_non_data = NULL,
7748 	.txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
7749 	.txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
7750 	.txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
7751 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
7752 	/* Added support for HK advance filter */
7753 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
7754 };
7755 
7756 static struct cdp_host_stats_ops dp_ops_host_stats = {
7757 	.txrx_per_peer_stats = dp_get_host_peer_stats,
7758 	.get_fw_peer_stats = dp_get_fw_peer_stats,
7759 	.get_htt_stats = dp_get_htt_stats,
7760 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
7761 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
7762 	.txrx_stats_publish = dp_txrx_stats_publish,
7763 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
7764 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
7765 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
7766 	/* TODO */
7767 };
7768 
7769 static struct cdp_raw_ops dp_ops_raw = {
7770 	/* TODO */
7771 };
7772 
7773 #ifdef CONFIG_WIN
7774 static struct cdp_pflow_ops dp_ops_pflow = {
7775 	/* TODO */
7776 };
7777 #endif /* CONFIG_WIN */
7778 
7779 #ifdef FEATURE_RUNTIME_PM
7780 /**
7781  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
7782  * @opaque_pdev: DP pdev context
7783  *
7784  * DP is ready to runtime suspend if there are no pending TX packets.
7785  *
7786  * Return: QDF_STATUS
7787  */
7788 static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
7789 {
7790 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7791 	struct dp_soc *soc = pdev->soc;
7792 
7793 	/* Abort if there are any pending TX packets */
7794 	if (dp_get_tx_pending(opaque_pdev) > 0) {
7795 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7796 			  FL("Abort suspend due to pending TX packets"));
7797 		return QDF_STATUS_E_AGAIN;
7798 	}
7799 
7800 	if (soc->intr_mode == DP_INTR_POLL)
7801 		qdf_timer_stop(&soc->int_timer);
7802 
7803 	return QDF_STATUS_SUCCESS;
7804 }
7805 
7806 /**
7807  * dp_runtime_resume() - ensure DP is ready to runtime resume
7808  * @opaque_pdev: DP pdev context
7809  *
7810  * Resume DP for runtime PM.
7811  *
7812  * Return: QDF_STATUS
7813  */
7814 static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
7815 {
7816 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7817 	struct dp_soc *soc = pdev->soc;
7818 	void *hal_srng;
7819 	int i;
7820 
7821 	if (soc->intr_mode == DP_INTR_POLL)
7822 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7823 
7824 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
7825 		hal_srng = soc->tcl_data_ring[i].hal_srng;
7826 		if (hal_srng) {
7827 			/* We actually only need to acquire the lock */
7828 			hal_srng_access_start(soc->hal_soc, hal_srng);
7829 			/* Update SRC ring head pointer for HW to send
7830 			   all pending packets */
7831 			hal_srng_access_end(soc->hal_soc, hal_srng);
7832 		}
7833 	}
7834 
7835 	return QDF_STATUS_SUCCESS;
7836 }
7837 #endif /* FEATURE_RUNTIME_PM */
7838 
7839 static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
7840 {
7841 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7842 	struct dp_soc *soc = pdev->soc;
7843 
7844 	if (soc->intr_mode == DP_INTR_POLL)
7845 		qdf_timer_stop(&soc->int_timer);
7846 
7847 	return QDF_STATUS_SUCCESS;
7848 }
7849 
7850 static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
7851 {
7852 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7853 	struct dp_soc *soc = pdev->soc;
7854 
7855 	if (soc->intr_mode == DP_INTR_POLL)
7856 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7857 
7858 	return QDF_STATUS_SUCCESS;
7859 }
7860 
7861 #ifndef CONFIG_WIN
7862 static struct cdp_misc_ops dp_ops_misc = {
7863 	.tx_non_std = dp_tx_non_std,
7864 	.get_opmode = dp_get_opmode,
7865 #ifdef FEATURE_RUNTIME_PM
7866 	.runtime_suspend = dp_runtime_suspend,
7867 	.runtime_resume = dp_runtime_resume,
7868 #endif /* FEATURE_RUNTIME_PM */
7869 	.pkt_log_init = dp_pkt_log_init,
7870 	.pkt_log_con_service = dp_pkt_log_con_service,
7871 };
7872 
7873 static struct cdp_flowctl_ops dp_ops_flowctl = {
7874 	/* WIFI 3.0 DP implement as required. */
7875 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7876 	.flow_pool_map_handler = dp_tx_flow_pool_map,
7877 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
7878 	.register_pause_cb = dp_txrx_register_pause_cb,
7879 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
7880 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
7881 };
7882 
7883 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
7884 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7885 };
7886 
7887 #ifdef IPA_OFFLOAD
7888 static struct cdp_ipa_ops dp_ops_ipa = {
7889 	.ipa_get_resource = dp_ipa_get_resource,
7890 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
7891 	.ipa_op_response = dp_ipa_op_response,
7892 	.ipa_register_op_cb = dp_ipa_register_op_cb,
7893 	.ipa_get_stat = dp_ipa_get_stat,
7894 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
7895 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
7896 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
7897 	.ipa_setup = dp_ipa_setup,
7898 	.ipa_cleanup = dp_ipa_cleanup,
7899 	.ipa_setup_iface = dp_ipa_setup_iface,
7900 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
7901 	.ipa_enable_pipes = dp_ipa_enable_pipes,
7902 	.ipa_disable_pipes = dp_ipa_disable_pipes,
7903 	.ipa_set_perf_level = dp_ipa_set_perf_level
7904 };
7905 #endif
7906 
7907 static struct cdp_bus_ops dp_ops_bus = {
7908 	.bus_suspend = dp_bus_suspend,
7909 	.bus_resume = dp_bus_resume
7910 };
7911 
7912 static struct cdp_ocb_ops dp_ops_ocb = {
7913 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7914 };
7915 
7916 
7917 static struct cdp_throttle_ops dp_ops_throttle = {
7918 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7919 };
7920 
7921 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
7922 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7923 };
7924 
7925 static struct cdp_cfg_ops dp_ops_cfg = {
7926 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7927 };
7928 
7929 /*
7930  * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
7931  * @dev: physical device instance
7932  * @peer_mac_addr: peer mac address
7933  * @local_id: local id for the peer
7934  * @debug_id: to track enum peer access
7935  *
7936  * Return: peer instance pointer
7937  */
7938 static inline void *
7939 dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, u8 *peer_mac_addr,
7940 			     u8 *local_id, enum peer_debug_id_type debug_id)
7941 {
7942 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
7943 	struct dp_peer *peer;
7944 
7945 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
7946 
7947 	if (!peer)
7948 		return NULL;
7949 
7950 	*local_id = peer->local_id;
7951 	DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
7952 
7953 	return peer;
7954 }
7955 
7956 /*
7957  * dp_peer_release_ref - release peer ref count
7958  * @peer: peer handle
7959  * @debug_id: to track enum peer access
7960  *
7961  * Return: None
7962  */
7963 static inline
7964 void dp_peer_release_ref(void *peer, enum peer_debug_id_type debug_id)
7965 {
7966 	dp_peer_unref_delete(peer);
7967 }
7968 
7969 static struct cdp_peer_ops dp_ops_peer = {
7970 	.register_peer = dp_register_peer,
7971 	.clear_peer = dp_clear_peer,
7972 	.find_peer_by_addr = dp_find_peer_by_addr,
7973 	.find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
7974 	.peer_get_ref_by_addr = dp_peer_get_ref_find_by_addr,
7975 	.peer_release_ref = dp_peer_release_ref,
7976 	.local_peer_id = dp_local_peer_id,
7977 	.peer_find_by_local_id = dp_peer_find_by_local_id,
7978 	.peer_state_update = dp_peer_state_update,
7979 	.get_vdevid = dp_get_vdevid,
7980 	.get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
7981 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
7982 	.get_vdev_for_peer = dp_get_vdev_for_peer,
7983 	.get_peer_state = dp_get_peer_state,
7984 };
7985 #endif
7986 
7987 static struct cdp_ops dp_txrx_ops = {
7988 	.cmn_drv_ops = &dp_ops_cmn,
7989 	.ctrl_ops = &dp_ops_ctrl,
7990 	.me_ops = &dp_ops_me,
7991 	.mon_ops = &dp_ops_mon,
7992 	.host_stats_ops = &dp_ops_host_stats,
7993 	.wds_ops = &dp_ops_wds,
7994 	.raw_ops = &dp_ops_raw,
7995 #ifdef CONFIG_WIN
7996 	.pflow_ops = &dp_ops_pflow,
7997 #endif /* CONFIG_WIN */
7998 #ifndef CONFIG_WIN
7999 	.misc_ops = &dp_ops_misc,
8000 	.cfg_ops = &dp_ops_cfg,
8001 	.flowctl_ops = &dp_ops_flowctl,
8002 	.l_flowctl_ops = &dp_ops_l_flowctl,
8003 #ifdef IPA_OFFLOAD
8004 	.ipa_ops = &dp_ops_ipa,
8005 #endif
8006 	.bus_ops = &dp_ops_bus,
8007 	.ocb_ops = &dp_ops_ocb,
8008 	.peer_ops = &dp_ops_peer,
8009 	.throttle_ops = &dp_ops_throttle,
8010 	.mob_stats_ops = &dp_ops_mob_stats,
8011 #endif
8012 };
8013 
8014 /*
8015  * dp_soc_set_txrx_ring_map()
8016  * @dp_soc: DP handler for soc
8017  *
8018  * Return: Void
8019  */
8020 static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
8021 {
8022 	uint32_t i;
8023 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
8024 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_DEFAULT_MAP][i];
8025 	}
8026 }
8027 
8028 #ifdef QCA_WIFI_QCA8074
8029 /**
8030  * dp_soc_attach_wifi3() - Attach txrx SOC
8031  * @ctrl_psoc:	Opaque SOC handle from control plane
8032  * @htc_handle:	Opaque HTC handle
8033  * @hif_handle:	Opaque HIF handle
8034  * @qdf_osdev:	QDF device
8035  * @ol_ops:	Offload Operations
8036  * @device_id:	Device ID
8037  *
8038  * Return: DP SOC handle on success, NULL on failure
8039  */
8040 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
8041 			  HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
8042 			  struct ol_if_ops *ol_ops, uint16_t device_id)
8043 {
8044 	struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
8045 	int target_type;
8046 
8047 	if (!soc) {
8048 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8049 			FL("DP SOC memory allocation failed"));
8050 		goto fail0;
8051 	}
8052 
8053 	soc->device_id = device_id;
8054 	soc->cdp_soc.ops = &dp_txrx_ops;
8055 	soc->cdp_soc.ol_ops = ol_ops;
8056 	soc->ctrl_psoc = ctrl_psoc;
8057 	soc->osdev = qdf_osdev;
8058 	soc->hif_handle = hif_handle;
8059 
8060 	soc->hal_soc = hif_get_hal_handle(hif_handle);
8061 	soc->htt_handle = htt_soc_attach(soc, ctrl_psoc, htc_handle,
8062 		soc->hal_soc, qdf_osdev);
8063 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
8064 
8065 	if (!soc->htt_handle) {
8066 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8067 			FL("HTT attach failed"));
8068 		goto fail1;
8069 	}
8070 
8071 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
8072 	if (!soc->wlan_cfg_ctx) {
8073 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8074 			FL("wlan_cfg_soc_attach failed"));
8075 		goto fail2;
8076 	}
8077 	target_type = hal_get_target_type(soc->hal_soc);
8078 	switch (target_type) {
8079 	case TARGET_TYPE_QCA6290:
8080 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
8081 					       REO_DST_RING_SIZE_QCA6290);
8082 		soc->ast_override_support = 1;
8083 		break;
8084 #ifdef QCA_WIFI_QCA6390
8085 	case TARGET_TYPE_QCA6390:
8086 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
8087 					       REO_DST_RING_SIZE_QCA6290);
8088 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
8089 		soc->ast_override_support = 1;
8090 		break;
8091 #endif
8092 	case TARGET_TYPE_QCA8074:
8093 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
8094 					       REO_DST_RING_SIZE_QCA8074);
8095 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
8096 		soc->hw_nac_monitor_support = 1;
8097 		break;
8098 	case TARGET_TYPE_QCA8074V2:
8099 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
8100 					       REO_DST_RING_SIZE_QCA8074);
8101 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
8102 		soc->hw_nac_monitor_support = 1;
8103 		soc->ast_override_support = 1;
8104 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
8105 		break;
8106 	default:
8107 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
8108 		qdf_assert_always(0);
8109 		break;
8110 	}
8111 
8112 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
8113 			     cfg_get(ctrl_psoc, CFG_DP_RX_HASH));
8114 	soc->cce_disable = false;
8115 
8116 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
8117 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
8118 				CDP_CFG_MAX_PEER_ID);
8119 
8120 		if (ret != -EINVAL) {
8121 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
8122 		}
8123 
8124 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
8125 				CDP_CFG_CCE_DISABLE);
8126 		if (ret == 1)
8127 			soc->cce_disable = true;
8128 	}
8129 
8130 	qdf_spinlock_create(&soc->peer_ref_mutex);
8131 
8132 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
8133 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
8134 
8135 	/* fill the tx/rx cpu ring map*/
8136 	dp_soc_set_txrx_ring_map(soc);
8137 
8138 	qdf_spinlock_create(&soc->htt_stats.lock);
8139 	/* initialize work queue for stats processing */
8140 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
8141 
8142 	return (void *)soc;
8143 
8144 fail2:
8145 	htt_soc_detach(soc->htt_handle);
8146 fail1:
8147 	qdf_mem_free(soc);
8148 fail0:
8149 	return NULL;
8150 }
8151 #endif
8152 
8153 /*
8154  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
8155  *
8156  * @soc: handle to DP soc
8157  * @mac_id: MAC id
8158  *
8159  * Return: Return pdev corresponding to MAC
8160  */
8161 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
8162 {
8163 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
8164 		return soc->pdev_list[mac_id];
8165 
8166 	/* Typically for MCL as there only 1 PDEV*/
8167 	return soc->pdev_list[0];
8168 }
8169 
8170 /*
8171  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
8172  * @soc:		DP SoC context
8173  * @max_mac_rings:	No of MAC rings
8174  *
8175  * Return: None
8176  */
8177 static
8178 void dp_is_hw_dbs_enable(struct dp_soc *soc,
8179 				int *max_mac_rings)
8180 {
8181 	bool dbs_enable = false;
8182 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
8183 		dbs_enable = soc->cdp_soc.ol_ops->
8184 		is_hw_dbs_2x2_capable(soc->ctrl_psoc);
8185 
8186 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
8187 }
8188 
8189 /*
8190 * dp_set_pktlog_wifi3() - attach txrx vdev
8191 * @pdev: Datapath PDEV handle
8192 * @event: which event's notifications are being subscribed to
8193 * @enable: WDI event subscribe or not. (True or False)
8194 *
8195 * Return: Success, NULL on failure
8196 */
8197 #ifdef WDI_EVENT_ENABLE
8198 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
8199 	bool enable)
8200 {
8201 	struct dp_soc *soc = pdev->soc;
8202 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
8203 	int max_mac_rings = wlan_cfg_get_num_mac_rings
8204 					(pdev->wlan_cfg_ctx);
8205 	uint8_t mac_id = 0;
8206 
8207 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
8208 
8209 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
8210 			FL("Max_mac_rings %d "),
8211 			max_mac_rings);
8212 
8213 	if (enable) {
8214 		switch (event) {
8215 		case WDI_EVENT_RX_DESC:
8216 			if (pdev->monitor_vdev) {
8217 				/* Nothing needs to be done if monitor mode is
8218 				 * enabled
8219 				 */
8220 				return 0;
8221 			}
8222 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
8223 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
8224 				htt_tlv_filter.mpdu_start = 1;
8225 				htt_tlv_filter.msdu_start = 1;
8226 				htt_tlv_filter.msdu_end = 1;
8227 				htt_tlv_filter.mpdu_end = 1;
8228 				htt_tlv_filter.packet_header = 1;
8229 				htt_tlv_filter.attention = 1;
8230 				htt_tlv_filter.ppdu_start = 1;
8231 				htt_tlv_filter.ppdu_end = 1;
8232 				htt_tlv_filter.ppdu_end_user_stats = 1;
8233 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
8234 				htt_tlv_filter.ppdu_end_status_done = 1;
8235 				htt_tlv_filter.enable_fp = 1;
8236 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8237 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8238 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8239 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8240 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8241 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
8242 
8243 				for (mac_id = 0; mac_id < max_mac_rings;
8244 								mac_id++) {
8245 					int mac_for_pdev =
8246 						dp_get_mac_id_for_pdev(mac_id,
8247 								pdev->pdev_id);
8248 
8249 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8250 					 mac_for_pdev,
8251 					 pdev->rxdma_mon_status_ring[mac_id]
8252 					 .hal_srng,
8253 					 RXDMA_MONITOR_STATUS,
8254 					 RX_BUFFER_SIZE,
8255 					 &htt_tlv_filter);
8256 
8257 				}
8258 
8259 				if (soc->reap_timer_init)
8260 					qdf_timer_mod(&soc->mon_reap_timer,
8261 					DP_INTR_POLL_TIMER_MS);
8262 			}
8263 			break;
8264 
8265 		case WDI_EVENT_LITE_RX:
8266 			if (pdev->monitor_vdev) {
8267 				/* Nothing needs to be done if monitor mode is
8268 				 * enabled
8269 				 */
8270 				return 0;
8271 			}
8272 
8273 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
8274 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
8275 
8276 				htt_tlv_filter.ppdu_start = 1;
8277 				htt_tlv_filter.ppdu_end = 1;
8278 				htt_tlv_filter.ppdu_end_user_stats = 1;
8279 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
8280 				htt_tlv_filter.ppdu_end_status_done = 1;
8281 				htt_tlv_filter.mpdu_start = 1;
8282 				htt_tlv_filter.enable_fp = 1;
8283 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8284 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8285 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8286 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8287 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8288 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
8289 
8290 				for (mac_id = 0; mac_id < max_mac_rings;
8291 								mac_id++) {
8292 					int mac_for_pdev =
8293 						dp_get_mac_id_for_pdev(mac_id,
8294 								pdev->pdev_id);
8295 
8296 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8297 					mac_for_pdev,
8298 					pdev->rxdma_mon_status_ring[mac_id]
8299 					.hal_srng,
8300 					RXDMA_MONITOR_STATUS,
8301 					RX_BUFFER_SIZE_PKTLOG_LITE,
8302 					&htt_tlv_filter);
8303 				}
8304 
8305 				if (soc->reap_timer_init)
8306 					qdf_timer_mod(&soc->mon_reap_timer,
8307 					DP_INTR_POLL_TIMER_MS);
8308 			}
8309 			break;
8310 
8311 		case WDI_EVENT_LITE_T2H:
8312 			if (pdev->monitor_vdev) {
8313 				/* Nothing needs to be done if monitor mode is
8314 				 * enabled
8315 				 */
8316 				return 0;
8317 			}
8318 
8319 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
8320 				int mac_for_pdev = dp_get_mac_id_for_pdev(
8321 							mac_id,	pdev->pdev_id);
8322 
8323 				pdev->pktlog_ppdu_stats = true;
8324 				dp_h2t_cfg_stats_msg_send(pdev,
8325 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
8326 					mac_for_pdev);
8327 			}
8328 			break;
8329 
8330 		default:
8331 			/* Nothing needs to be done for other pktlog types */
8332 			break;
8333 		}
8334 	} else {
8335 		switch (event) {
8336 		case WDI_EVENT_RX_DESC:
8337 		case WDI_EVENT_LITE_RX:
8338 			if (pdev->monitor_vdev) {
8339 				/* Nothing needs to be done if monitor mode is
8340 				 * enabled
8341 				 */
8342 				return 0;
8343 			}
8344 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
8345 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
8346 
8347 				for (mac_id = 0; mac_id < max_mac_rings;
8348 								mac_id++) {
8349 					int mac_for_pdev =
8350 						dp_get_mac_id_for_pdev(mac_id,
8351 								pdev->pdev_id);
8352 
8353 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8354 					  mac_for_pdev,
8355 					  pdev->rxdma_mon_status_ring[mac_id]
8356 					  .hal_srng,
8357 					  RXDMA_MONITOR_STATUS,
8358 					  RX_BUFFER_SIZE,
8359 					  &htt_tlv_filter);
8360 				}
8361 
8362 				if (soc->reap_timer_init)
8363 					qdf_timer_stop(&soc->mon_reap_timer);
8364 			}
8365 			break;
8366 		case WDI_EVENT_LITE_T2H:
8367 			if (pdev->monitor_vdev) {
8368 				/* Nothing needs to be done if monitor mode is
8369 				 * enabled
8370 				 */
8371 				return 0;
8372 			}
8373 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
8374 			 * passing value 0. Once these macros will define in htt
8375 			 * header file will use proper macros
8376 			*/
8377 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
8378 				int mac_for_pdev =
8379 						dp_get_mac_id_for_pdev(mac_id,
8380 								pdev->pdev_id);
8381 
8382 				pdev->pktlog_ppdu_stats = false;
8383 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
8384 					dp_h2t_cfg_stats_msg_send(pdev, 0,
8385 								mac_for_pdev);
8386 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
8387 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
8388 								mac_for_pdev);
8389 				} else if (pdev->enhanced_stats_en) {
8390 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
8391 								mac_for_pdev);
8392 				}
8393 			}
8394 
8395 			break;
8396 		default:
8397 			/* Nothing needs to be done for other pktlog types */
8398 			break;
8399 		}
8400 	}
8401 	return 0;
8402 }
8403 #endif
8404