xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision fa47688f04ef001a6dcafaebdcc3c031f15ee75e)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_htt.h"
31 #include "dp_types.h"
32 #include "dp_internal.h"
33 #include "dp_tx.h"
34 #include "dp_tx_desc.h"
35 #include "dp_rx.h"
36 #include <cdp_txrx_handle.h>
37 #include <wlan_cfg.h>
38 #include "cdp_txrx_cmn_struct.h"
39 #include "cdp_txrx_stats_struct.h"
40 #include "cdp_txrx_cmn_reg.h"
41 #include <qdf_util.h>
42 #include "dp_peer.h"
43 #include "dp_rx_mon.h"
44 #include "htt_stats.h"
45 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
46 #include "cfg_ucfg_api.h"
47 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
48 #include "cdp_txrx_flow_ctrl_v2.h"
49 #else
50 static inline void
51 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
52 {
53 	return;
54 }
55 #endif
56 #include "dp_ipa.h"
57 #include "dp_cal_client_api.h"
58 #ifdef CONFIG_MCL
59 extern int con_mode_monitor;
60 #ifndef REMOVE_PKT_LOG
61 #include <pktlog_ac_api.h>
62 #include <pktlog_ac.h>
63 #endif
64 #endif
65 void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle);
66 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
67 static struct dp_soc *
68 dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
69 	      struct ol_if_ops *ol_ops, uint16_t device_id);
70 static void dp_pktlogmod_exit(struct dp_pdev *handle);
71 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
72 				uint8_t *peer_mac_addr,
73 				struct cdp_ctrl_objmgr_peer *ctrl_peer);
74 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
75 static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
76 static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
77 
78 #define DP_INTR_POLL_TIMER_MS	10
79 /* Generic AST entry aging timer value */
80 #define DP_AST_AGING_TIMER_DEFAULT_MS	1000
81 /* WDS AST entry aging timer value */
82 #define DP_WDS_AST_AGING_TIMER_DEFAULT_MS	120000
83 #define DP_WDS_AST_AGING_TIMER_CNT \
84 ((DP_WDS_AST_AGING_TIMER_DEFAULT_MS / DP_AST_AGING_TIMER_DEFAULT_MS) - 1)
85 #define DP_MCS_LENGTH (6*MAX_MCS)
86 #define DP_NSS_LENGTH (6*SS_COUNT)
87 #define DP_MU_GROUP_SHOW 16
88 #define DP_MU_GROUP_LENGTH (6 * DP_MU_GROUP_SHOW)
89 #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
90 #define DP_MAX_INT_CONTEXTS_STRING_LENGTH (6 * WLAN_CFG_INT_NUM_CONTEXTS)
91 #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
92 #define DP_MAX_MCS_STRING_LEN 30
93 #define DP_CURR_FW_STATS_AVAIL 19
94 #define DP_HTT_DBG_EXT_STATS_MAX 256
95 #define DP_MAX_SLEEP_TIME 100
96 #ifndef QCA_WIFI_3_0_EMU
97 #define SUSPEND_DRAIN_WAIT 500
98 #else
99 #define SUSPEND_DRAIN_WAIT 3000
100 #endif
101 
102 #ifdef IPA_OFFLOAD
103 /* Exclude IPA rings from the interrupt context */
104 #define TX_RING_MASK_VAL	0xb
105 #define RX_RING_MASK_VAL	0x7
106 #else
107 #define TX_RING_MASK_VAL	0xF
108 #define RX_RING_MASK_VAL	0xF
109 #endif
110 
111 #define STR_MAXLEN	64
112 
113 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
114 
115 /* PPDU stats mask sent to FW to enable enhanced stats */
116 #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
117 /* PPDU stats mask sent to FW to support debug sniffer feature */
118 #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
119 /* PPDU stats mask sent to FW to support BPR feature*/
120 #define DP_PPDU_STATS_CFG_BPR 0x2000
121 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
122 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
123 				   DP_PPDU_STATS_CFG_ENH_STATS)
124 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
125 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
126 				      DP_PPDU_TXLITE_STATS_BITMASK_CFG)
127 
128 #define RNG_ERR		"SRNG setup failed for"
129 /**
130  * default_dscp_tid_map - Default DSCP-TID mapping
131  *
132  * DSCP        TID
133  * 000000      0
134  * 001000      1
135  * 010000      2
136  * 011000      3
137  * 100000      4
138  * 101000      5
139  * 110000      6
140  * 111000      7
141  */
142 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
143 	0, 0, 0, 0, 0, 0, 0, 0,
144 	1, 1, 1, 1, 1, 1, 1, 1,
145 	2, 2, 2, 2, 2, 2, 2, 2,
146 	3, 3, 3, 3, 3, 3, 3, 3,
147 	4, 4, 4, 4, 4, 4, 4, 4,
148 	5, 5, 5, 5, 5, 5, 5, 5,
149 	6, 6, 6, 6, 6, 6, 6, 6,
150 	7, 7, 7, 7, 7, 7, 7, 7,
151 };
152 
153 /*
154  * struct dp_rate_debug
155  *
156  * @mcs_type: print string for a given mcs
157  * @valid: valid mcs rate?
158  */
159 struct dp_rate_debug {
160 	char mcs_type[DP_MAX_MCS_STRING_LEN];
161 	uint8_t valid;
162 };
163 
164 #define MCS_VALID 1
165 #define MCS_INVALID 0
166 
167 static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
168 
169 	{
170 		{"OFDM 48 Mbps", MCS_VALID},
171 		{"OFDM 24 Mbps", MCS_VALID},
172 		{"OFDM 12 Mbps", MCS_VALID},
173 		{"OFDM 6 Mbps ", MCS_VALID},
174 		{"OFDM 54 Mbps", MCS_VALID},
175 		{"OFDM 36 Mbps", MCS_VALID},
176 		{"OFDM 18 Mbps", MCS_VALID},
177 		{"OFDM 9 Mbps ", MCS_VALID},
178 		{"INVALID ", MCS_INVALID},
179 		{"INVALID ", MCS_INVALID},
180 		{"INVALID ", MCS_INVALID},
181 		{"INVALID ", MCS_INVALID},
182 		{"INVALID ", MCS_VALID},
183 	},
184 	{
185 		{"CCK 11 Mbps Long  ", MCS_VALID},
186 		{"CCK 5.5 Mbps Long ", MCS_VALID},
187 		{"CCK 2 Mbps Long   ", MCS_VALID},
188 		{"CCK 1 Mbps Long   ", MCS_VALID},
189 		{"CCK 11 Mbps Short ", MCS_VALID},
190 		{"CCK 5.5 Mbps Short", MCS_VALID},
191 		{"CCK 2 Mbps Short  ", MCS_VALID},
192 		{"INVALID ", MCS_INVALID},
193 		{"INVALID ", MCS_INVALID},
194 		{"INVALID ", MCS_INVALID},
195 		{"INVALID ", MCS_INVALID},
196 		{"INVALID ", MCS_INVALID},
197 		{"INVALID ", MCS_VALID},
198 	},
199 	{
200 		{"HT MCS 0 (BPSK 1/2)  ", MCS_VALID},
201 		{"HT MCS 1 (QPSK 1/2)  ", MCS_VALID},
202 		{"HT MCS 2 (QPSK 3/4)  ", MCS_VALID},
203 		{"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
204 		{"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
205 		{"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
206 		{"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
207 		{"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
208 		{"INVALID ", MCS_INVALID},
209 		{"INVALID ", MCS_INVALID},
210 		{"INVALID ", MCS_INVALID},
211 		{"INVALID ", MCS_INVALID},
212 		{"INVALID ", MCS_VALID},
213 	},
214 	{
215 		{"VHT MCS 0 (BPSK 1/2)     ", MCS_VALID},
216 		{"VHT MCS 1 (QPSK 1/2)     ", MCS_VALID},
217 		{"VHT MCS 2 (QPSK 3/4)     ", MCS_VALID},
218 		{"VHT MCS 3 (16-QAM 1/2)   ", MCS_VALID},
219 		{"VHT MCS 4 (16-QAM 3/4)   ", MCS_VALID},
220 		{"VHT MCS 5 (64-QAM 2/3)   ", MCS_VALID},
221 		{"VHT MCS 6 (64-QAM 3/4)   ", MCS_VALID},
222 		{"VHT MCS 7 (64-QAM 5/6)   ", MCS_VALID},
223 		{"VHT MCS 8 (256-QAM 3/4)  ", MCS_VALID},
224 		{"VHT MCS 9 (256-QAM 5/6)  ", MCS_VALID},
225 		{"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
226 		{"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
227 		{"INVALID ", MCS_VALID},
228 	},
229 	{
230 		{"HE MCS 0 (BPSK 1/2)     ", MCS_VALID},
231 		{"HE MCS 1 (QPSK 1/2)     ", MCS_VALID},
232 		{"HE MCS 2 (QPSK 3/4)     ", MCS_VALID},
233 		{"HE MCS 3 (16-QAM 1/2)   ", MCS_VALID},
234 		{"HE MCS 4 (16-QAM 3/4)   ", MCS_VALID},
235 		{"HE MCS 5 (64-QAM 2/3)   ", MCS_VALID},
236 		{"HE MCS 6 (64-QAM 3/4)   ", MCS_VALID},
237 		{"HE MCS 7 (64-QAM 5/6)   ", MCS_VALID},
238 		{"HE MCS 8 (256-QAM 3/4)  ", MCS_VALID},
239 		{"HE MCS 9 (256-QAM 5/6)  ", MCS_VALID},
240 		{"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
241 		{"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
242 		{"INVALID ", MCS_VALID},
243 	}
244 };
245 
246 /**
247  * dp_cpu_ring_map_type - dp tx cpu ring map
248  * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded
249  * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded
250  * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded
251  * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded
252  * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded
253  * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val
254  */
255 enum dp_cpu_ring_map_types {
256 	DP_NSS_DEFAULT_MAP,
257 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
258 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
259 	DP_NSS_DBDC_OFFLOADED_MAP,
260 	DP_NSS_DBTC_OFFLOADED_MAP,
261 	DP_NSS_CPU_RING_MAP_MAX
262 };
263 
264 /**
265  * @brief Cpu to tx ring map
266  */
267 #ifdef CONFIG_WIN
268 static uint8_t
269 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
270 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
271 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
272 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
273 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
274 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
275 };
276 #else
277 static uint8_t
278 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
279 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
280 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
281 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
282 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
283 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
284 };
285 #endif
286 
287 /**
288  * @brief Select the type of statistics
289  */
290 enum dp_stats_type {
291 	STATS_FW = 0,
292 	STATS_HOST = 1,
293 	STATS_TYPE_MAX = 2,
294 };
295 
296 /**
297  * @brief General Firmware statistics options
298  *
299  */
300 enum dp_fw_stats {
301 	TXRX_FW_STATS_INVALID	= -1,
302 };
303 
304 /**
305  * dp_stats_mapping_table - Firmware and Host statistics
306  * currently supported
307  */
308 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
309 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
310 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
311 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
312 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
313 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
314 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
315 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
316 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
317 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
318 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
319 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
320 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
321 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
322 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
323 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
324 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
325 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
326 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
327 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
328 	/* Last ENUM for HTT FW STATS */
329 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
330 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
331 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
332 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
333 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
334 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
335 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
336 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
337 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
338 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
339 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
340 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
341 };
342 
343 /* MCL specific functions */
344 #ifdef CONFIG_MCL
345 /**
346  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
347  * @soc: pointer to dp_soc handle
348  * @intr_ctx_num: interrupt context number for which mon mask is needed
349  *
350  * For MCL, monitor mode rings are being processed in timer contexts (polled).
351  * This function is returning 0, since in interrupt mode(softirq based RX),
352  * we donot want to process monitor mode rings in a softirq.
353  *
354  * So, in case packet log is enabled for SAP/STA/P2P modes,
355  * regular interrupt processing will not process monitor mode rings. It would be
356  * done in a separate timer context.
357  *
358  * Return: 0
359  */
360 static inline
361 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
362 {
363 	return 0;
364 }
365 
366 /*
367  * dp_service_mon_rings()- timer to reap monitor rings
368  * reqd as we are not getting ppdu end interrupts
369  * @arg: SoC Handle
370  *
371  * Return:
372  *
373  */
374 static void dp_service_mon_rings(void *arg)
375 {
376 	struct dp_soc *soc = (struct dp_soc *)arg;
377 	int ring = 0, work_done, mac_id;
378 	struct dp_pdev *pdev = NULL;
379 
380 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
381 		pdev = soc->pdev_list[ring];
382 		if (!pdev)
383 			continue;
384 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
385 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
386 								pdev->pdev_id);
387 			work_done = dp_mon_process(soc, mac_for_pdev,
388 						   QCA_NAPI_BUDGET);
389 
390 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
391 				  FL("Reaped %d descs from Monitor rings"),
392 				  work_done);
393 		}
394 	}
395 
396 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
397 }
398 
399 #ifndef REMOVE_PKT_LOG
400 /**
401  * dp_pkt_log_init() - API to initialize packet log
402  * @ppdev: physical device handle
403  * @scn: HIF context
404  *
405  * Return: none
406  */
407 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
408 {
409 	struct dp_pdev *handle = (struct dp_pdev *)ppdev;
410 
411 	if (handle->pkt_log_init) {
412 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
413 			  "%s: Packet log not initialized", __func__);
414 		return;
415 	}
416 
417 	pktlog_sethandle(&handle->pl_dev, scn);
418 	pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
419 
420 	if (pktlogmod_init(scn)) {
421 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
422 			  "%s: pktlogmod_init failed", __func__);
423 		handle->pkt_log_init = false;
424 	} else {
425 		handle->pkt_log_init = true;
426 	}
427 }
428 
429 /**
430  * dp_pkt_log_con_service() - connect packet log service
431  * @ppdev: physical device handle
432  * @scn: device context
433  *
434  * Return: none
435  */
436 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
437 {
438 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
439 
440 	dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
441 	pktlog_htc_attach();
442 }
443 
444 /**
445  * dp_get_num_rx_contexts() - get number of RX contexts
446  * @soc_hdl: cdp opaque soc handle
447  *
448  * Return: number of RX contexts
449  */
450 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
451 {
452 	int i;
453 	int num_rx_contexts = 0;
454 
455 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
456 
457 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
458 		if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
459 			num_rx_contexts++;
460 
461 	return num_rx_contexts;
462 }
463 
464 /**
465  * dp_pktlogmod_exit() - API to cleanup pktlog info
466  * @handle: Pdev handle
467  *
468  * Return: none
469  */
470 static void dp_pktlogmod_exit(struct dp_pdev *handle)
471 {
472 	void *scn = (void *)handle->soc->hif_handle;
473 
474 	if (!scn) {
475 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
476 			  "%s: Invalid hif(scn) handle", __func__);
477 		return;
478 	}
479 
480 	pktlogmod_exit(scn);
481 	handle->pkt_log_init = false;
482 }
483 #endif
484 #else
485 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
486 
487 /**
488  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
489  * @soc: pointer to dp_soc handle
490  * @intr_ctx_num: interrupt context number for which mon mask is needed
491  *
492  * Return: mon mask value
493  */
494 static inline
495 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
496 {
497 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
498 }
499 #endif
500 
501 /**
502  * dp_get_dp_vdev_from_cdp_vdev() - get dp_vdev from cdp_vdev by type-casting
503  * @cdp_opaque_vdev: pointer to cdp_vdev
504  *
505  * Return: pointer to dp_vdev
506  */
507 static
508 struct dp_vdev *dp_get_dp_vdev_from_cdp_vdev(struct cdp_vdev *cdp_opaque_vdev)
509 {
510 	return (struct dp_vdev *)cdp_opaque_vdev;
511 }
512 
513 
514 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
515 					struct cdp_peer *peer_hdl,
516 					uint8_t *mac_addr,
517 					enum cdp_txrx_ast_entry_type type,
518 					uint32_t flags)
519 {
520 
521 	return dp_peer_add_ast((struct dp_soc *)soc_hdl,
522 				(struct dp_peer *)peer_hdl,
523 				mac_addr,
524 				type,
525 				flags);
526 }
527 
528 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
529 						struct cdp_peer *peer_hdl,
530 						uint8_t *wds_macaddr,
531 						uint32_t flags)
532 {
533 	int status = -1;
534 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
535 	struct dp_ast_entry  *ast_entry = NULL;
536 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
537 
538 	qdf_spin_lock_bh(&soc->ast_lock);
539 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
540 						    peer->vdev->pdev->pdev_id);
541 
542 	if (ast_entry) {
543 		status = dp_peer_update_ast(soc,
544 					    peer,
545 					    ast_entry, flags);
546 	}
547 
548 	qdf_spin_unlock_bh(&soc->ast_lock);
549 
550 	return status;
551 }
552 
553 /*
554  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
555  * @soc_handle:		Datapath SOC handle
556  * @wds_macaddr:	WDS entry MAC Address
557  * Return: None
558  */
559 static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
560 				   uint8_t *wds_macaddr, void *vdev_handle)
561 {
562 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
563 	struct dp_ast_entry *ast_entry = NULL;
564 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
565 
566 	qdf_spin_lock_bh(&soc->ast_lock);
567 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
568 						    vdev->pdev->pdev_id);
569 
570 	if (ast_entry) {
571 		if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
572 			(ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
573 			(ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS)) {
574 			ast_entry->is_active = TRUE;
575 		}
576 	}
577 
578 	qdf_spin_unlock_bh(&soc->ast_lock);
579 }
580 
581 /*
582  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
583  * @soc:		Datapath SOC handle
584  *
585  * Return: None
586  */
587 static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
588 					 void *vdev_hdl)
589 {
590 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
591 	struct dp_pdev *pdev;
592 	struct dp_vdev *vdev;
593 	struct dp_peer *peer;
594 	struct dp_ast_entry *ase, *temp_ase;
595 	int i;
596 
597 	qdf_spin_lock_bh(&soc->ast_lock);
598 
599 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
600 		pdev = soc->pdev_list[i];
601 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
602 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
603 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
604 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
605 					if ((ase->type ==
606 						CDP_TXRX_AST_TYPE_STATIC) ||
607 						(ase->type ==
608 						CDP_TXRX_AST_TYPE_SELF) ||
609 						(ase->type ==
610 						CDP_TXRX_AST_TYPE_STA_BSS))
611 						continue;
612 					ase->is_active = TRUE;
613 				}
614 			}
615 		}
616 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
617 	}
618 
619 	qdf_spin_unlock_bh(&soc->ast_lock);
620 }
621 
622 /*
623  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
624  * @soc:		Datapath SOC handle
625  *
626  * Return: None
627  */
628 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
629 {
630 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
631 	struct dp_pdev *pdev;
632 	struct dp_vdev *vdev;
633 	struct dp_peer *peer;
634 	struct dp_ast_entry *ase, *temp_ase;
635 	int i;
636 
637 	qdf_spin_lock_bh(&soc->ast_lock);
638 
639 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
640 		pdev = soc->pdev_list[i];
641 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
642 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
643 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
644 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
645 					if ((ase->type ==
646 						CDP_TXRX_AST_TYPE_STATIC) ||
647 						(ase->type ==
648 						 CDP_TXRX_AST_TYPE_SELF) ||
649 						(ase->type ==
650 						 CDP_TXRX_AST_TYPE_STA_BSS))
651 						continue;
652 					dp_peer_del_ast(soc, ase);
653 				}
654 			}
655 		}
656 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
657 	}
658 
659 	qdf_spin_unlock_bh(&soc->ast_lock);
660 }
661 
662 /**
663  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
664  *                                       and return ast entry information
665  *                                       of first ast entry found in the
666  *                                       table with given mac address
667  *
668  * @soc : data path soc handle
669  * @ast_mac_addr : AST entry mac address
670  * @ast_entry_info : ast entry information
671  *
672  * return : true if ast entry found with ast_mac_addr
673  *          false if ast entry not found
674  */
675 static bool dp_peer_get_ast_info_by_soc_wifi3
676 	(struct cdp_soc_t *soc_hdl,
677 	 uint8_t *ast_mac_addr,
678 	 struct cdp_ast_entry_info *ast_entry_info)
679 {
680 	struct dp_ast_entry *ast_entry;
681 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
682 
683 	qdf_spin_lock_bh(&soc->ast_lock);
684 
685 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
686 
687 	if (ast_entry && !ast_entry->delete_in_progress) {
688 		ast_entry_info->type = ast_entry->type;
689 		ast_entry_info->pdev_id = ast_entry->pdev_id;
690 		ast_entry_info->vdev_id = ast_entry->vdev_id;
691 		ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
692 		qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
693 			     &ast_entry->peer->mac_addr.raw[0],
694 			     DP_MAC_ADDR_LEN);
695 		qdf_spin_unlock_bh(&soc->ast_lock);
696 		return true;
697 	}
698 
699 	qdf_spin_unlock_bh(&soc->ast_lock);
700 	return false;
701 }
702 
703 /**
704  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
705  *                                          and return ast entry information
706  *                                          if mac address and pdev_id matches
707  *
708  * @soc : data path soc handle
709  * @ast_mac_addr : AST entry mac address
710  * @pdev_id : pdev_id
711  * @ast_entry_info : ast entry information
712  *
713  * return : true if ast entry found with ast_mac_addr
714  *          false if ast entry not found
715  */
716 static bool dp_peer_get_ast_info_by_pdevid_wifi3
717 		(struct cdp_soc_t *soc_hdl,
718 		 uint8_t *ast_mac_addr,
719 		 uint8_t pdev_id,
720 		 struct cdp_ast_entry_info *ast_entry_info)
721 {
722 	struct dp_ast_entry *ast_entry;
723 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
724 
725 	qdf_spin_lock_bh(&soc->ast_lock);
726 
727 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, pdev_id);
728 
729 	if (ast_entry && !ast_entry->delete_in_progress) {
730 		ast_entry_info->type = ast_entry->type;
731 		ast_entry_info->pdev_id = ast_entry->pdev_id;
732 		ast_entry_info->vdev_id = ast_entry->vdev_id;
733 		ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
734 		qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
735 			     &ast_entry->peer->mac_addr.raw[0],
736 			     DP_MAC_ADDR_LEN);
737 		qdf_spin_unlock_bh(&soc->ast_lock);
738 		return true;
739 	}
740 
741 	qdf_spin_unlock_bh(&soc->ast_lock);
742 	return false;
743 }
744 
745 /**
746  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
747  *                            with given mac address
748  *
749  * @soc : data path soc handle
750  * @ast_mac_addr : AST entry mac address
751  * @callback : callback function to called on ast delete response from FW
752  * @cookie : argument to be passed to callback
753  *
754  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
755  *          is sent
756  *          QDF_STATUS_E_INVAL false if ast entry not found
757  */
758 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
759 					       uint8_t *mac_addr,
760 					       txrx_ast_free_cb callback,
761 					       void *cookie)
762 
763 {
764 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
765 	struct dp_ast_entry *ast_entry;
766 	txrx_ast_free_cb cb = NULL;
767 	void *arg = NULL;
768 
769 	qdf_spin_lock_bh(&soc->ast_lock);
770 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
771 	if (!ast_entry) {
772 		qdf_spin_unlock_bh(&soc->ast_lock);
773 		return -QDF_STATUS_E_INVAL;
774 	}
775 
776 	if (ast_entry->callback) {
777 		cb = ast_entry->callback;
778 		arg = ast_entry->cookie;
779 	}
780 
781 	ast_entry->callback = callback;
782 	ast_entry->cookie = cookie;
783 
784 	/*
785 	 * if delete_in_progress is set AST delete is sent to target
786 	 * and host is waiting for response should not send delete
787 	 * again
788 	 */
789 	if (!ast_entry->delete_in_progress)
790 		dp_peer_del_ast(soc, ast_entry);
791 
792 	qdf_spin_unlock_bh(&soc->ast_lock);
793 	if (cb) {
794 		cb(soc->ctrl_psoc,
795 		   soc,
796 		   arg,
797 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
798 	}
799 	return QDF_STATUS_SUCCESS;
800 }
801 
802 /**
803  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
804  *                                   table if mac address and pdev_id matches
805  *
806  * @soc : data path soc handle
807  * @ast_mac_addr : AST entry mac address
808  * @pdev_id : pdev id
809  * @callback : callback function to called on ast delete response from FW
810  * @cookie : argument to be passed to callback
811  *
812  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
813  *          is sent
814  *          QDF_STATUS_E_INVAL false if ast entry not found
815  */
816 
817 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
818 						uint8_t *mac_addr,
819 						uint8_t pdev_id,
820 						txrx_ast_free_cb callback,
821 						void *cookie)
822 
823 {
824 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
825 	struct dp_ast_entry *ast_entry;
826 	txrx_ast_free_cb cb = NULL;
827 	void *arg = NULL;
828 
829 	qdf_spin_lock_bh(&soc->ast_lock);
830 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
831 
832 	if (!ast_entry) {
833 		qdf_spin_unlock_bh(&soc->ast_lock);
834 		return -QDF_STATUS_E_INVAL;
835 	}
836 
837 	if (ast_entry->callback) {
838 		cb = ast_entry->callback;
839 		arg = ast_entry->cookie;
840 	}
841 
842 	ast_entry->callback = callback;
843 	ast_entry->cookie = cookie;
844 
845 	/*
846 	 * if delete_in_progress is set AST delete is sent to target
847 	 * and host is waiting for response should not sent delete
848 	 * again
849 	 */
850 	if (!ast_entry->delete_in_progress)
851 		dp_peer_del_ast(soc, ast_entry);
852 
853 	qdf_spin_unlock_bh(&soc->ast_lock);
854 
855 	if (cb) {
856 		cb(soc->ctrl_psoc,
857 		   soc,
858 		   arg,
859 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
860 	}
861 	return QDF_STATUS_SUCCESS;
862 }
863 
864 /**
865  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
866  * @ring_num: ring num of the ring being queried
867  * @grp_mask: the grp_mask array for the ring type in question.
868  *
869  * The grp_mask array is indexed by group number and the bit fields correspond
870  * to ring numbers.  We are finding which interrupt group a ring belongs to.
871  *
872  * Return: the index in the grp_mask array with the ring number.
873  * -QDF_STATUS_E_NOENT if no entry is found
874  */
875 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
876 {
877 	int ext_group_num;
878 	int mask = 1 << ring_num;
879 
880 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
881 	     ext_group_num++) {
882 		if (mask & grp_mask[ext_group_num])
883 			return ext_group_num;
884 	}
885 
886 	return -QDF_STATUS_E_NOENT;
887 }
888 
889 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
890 				       enum hal_ring_type ring_type,
891 				       int ring_num)
892 {
893 	int *grp_mask;
894 
895 	switch (ring_type) {
896 	case WBM2SW_RELEASE:
897 		/* dp_tx_comp_handler - soc->tx_comp_ring */
898 		if (ring_num < 3)
899 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
900 
901 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
902 		else if (ring_num == 3) {
903 			/* sw treats this as a separate ring type */
904 			grp_mask = &soc->wlan_cfg_ctx->
905 				int_rx_wbm_rel_ring_mask[0];
906 			ring_num = 0;
907 		} else {
908 			qdf_assert(0);
909 			return -QDF_STATUS_E_NOENT;
910 		}
911 	break;
912 
913 	case REO_EXCEPTION:
914 		/* dp_rx_err_process - &soc->reo_exception_ring */
915 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
916 	break;
917 
918 	case REO_DST:
919 		/* dp_rx_process - soc->reo_dest_ring */
920 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
921 	break;
922 
923 	case REO_STATUS:
924 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
925 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
926 	break;
927 
928 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
929 	case RXDMA_MONITOR_STATUS:
930 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
931 	case RXDMA_MONITOR_DST:
932 		/* dp_mon_process */
933 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
934 	break;
935 	case RXDMA_DST:
936 		/* dp_rxdma_err_process */
937 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
938 	break;
939 
940 	case RXDMA_BUF:
941 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
942 	break;
943 
944 	case RXDMA_MONITOR_BUF:
945 		/* TODO: support low_thresh interrupt */
946 		return -QDF_STATUS_E_NOENT;
947 	break;
948 
949 	case TCL_DATA:
950 	case TCL_CMD:
951 	case REO_CMD:
952 	case SW2WBM_RELEASE:
953 	case WBM_IDLE_LINK:
954 		/* normally empty SW_TO_HW rings */
955 		return -QDF_STATUS_E_NOENT;
956 	break;
957 
958 	case TCL_STATUS:
959 	case REO_REINJECT:
960 		/* misc unused rings */
961 		return -QDF_STATUS_E_NOENT;
962 	break;
963 
964 	case CE_SRC:
965 	case CE_DST:
966 	case CE_DST_STATUS:
967 		/* CE_rings - currently handled by hif */
968 	default:
969 		return -QDF_STATUS_E_NOENT;
970 	break;
971 	}
972 
973 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
974 }
975 
976 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
977 			      *ring_params, int ring_type, int ring_num)
978 {
979 	int msi_group_number;
980 	int msi_data_count;
981 	int ret;
982 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
983 
984 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
985 					    &msi_data_count, &msi_data_start,
986 					    &msi_irq_start);
987 
988 	if (ret)
989 		return;
990 
991 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
992 						       ring_num);
993 	if (msi_group_number < 0) {
994 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
995 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
996 			ring_type, ring_num);
997 		ring_params->msi_addr = 0;
998 		ring_params->msi_data = 0;
999 		return;
1000 	}
1001 
1002 	if (msi_group_number > msi_data_count) {
1003 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
1004 			FL("2 msi_groups will share an msi; msi_group_num %d"),
1005 			msi_group_number);
1006 
1007 		QDF_ASSERT(0);
1008 	}
1009 
1010 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1011 
1012 	ring_params->msi_addr = addr_low;
1013 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1014 	ring_params->msi_data = (msi_group_number % msi_data_count)
1015 		+ msi_data_start;
1016 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1017 }
1018 
1019 /**
1020  * dp_print_ast_stats() - Dump AST table contents
1021  * @soc: Datapath soc handle
1022  *
1023  * return void
1024  */
1025 #ifdef FEATURE_AST
1026 void dp_print_ast_stats(struct dp_soc *soc)
1027 {
1028 	uint8_t i;
1029 	uint8_t num_entries = 0;
1030 	struct dp_vdev *vdev;
1031 	struct dp_pdev *pdev;
1032 	struct dp_peer *peer;
1033 	struct dp_ast_entry *ase, *tmp_ase;
1034 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1035 			"NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
1036 			"DA", "HMWDS_SEC"};
1037 
1038 	DP_PRINT_STATS("AST Stats:");
1039 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1040 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1041 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1042 	DP_PRINT_STATS("AST Table:");
1043 
1044 	qdf_spin_lock_bh(&soc->ast_lock);
1045 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1046 		pdev = soc->pdev_list[i];
1047 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
1048 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1049 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1050 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1051 					DP_PRINT_STATS("%6d mac_addr = %pM"
1052 							" peer_mac_addr = %pM"
1053 							" peer_id = %u"
1054 							" type = %s"
1055 							" next_hop = %d"
1056 							" is_active = %d"
1057 							" is_bss = %d"
1058 							" ast_idx = %d"
1059 							" ast_hash = %d"
1060 							" delete_in_progress = %d"
1061 							" pdev_id = %d"
1062 							" vdev_id = %d",
1063 							++num_entries,
1064 							ase->mac_addr.raw,
1065 							ase->peer->mac_addr.raw,
1066 							ase->peer->peer_ids[0],
1067 							type[ase->type],
1068 							ase->next_hop,
1069 							ase->is_active,
1070 							ase->is_bss,
1071 							ase->ast_idx,
1072 							ase->ast_hash_value,
1073 							ase->delete_in_progress,
1074 							ase->pdev_id,
1075 							ase->vdev_id);
1076 				}
1077 			}
1078 		}
1079 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1080 	}
1081 	qdf_spin_unlock_bh(&soc->ast_lock);
1082 }
1083 #else
1084 void dp_print_ast_stats(struct dp_soc *soc)
1085 {
1086 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1087 	return;
1088 }
1089 #endif
1090 
1091 /**
1092  *  dp_print_peer_table() - Dump all Peer stats
1093  * @vdev: Datapath Vdev handle
1094  *
1095  * return void
1096  */
1097 static void dp_print_peer_table(struct dp_vdev *vdev)
1098 {
1099 	struct dp_peer *peer = NULL;
1100 
1101 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1102 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1103 		if (!peer) {
1104 			DP_PRINT_STATS("Invalid Peer");
1105 			return;
1106 		}
1107 		DP_PRINT_STATS("    peer_mac_addr = %pM"
1108 			       " nawds_enabled = %d"
1109 			       " bss_peer = %d"
1110 			       " wapi = %d"
1111 			       " wds_enabled = %d"
1112 			       " delete in progress = %d"
1113 			       " peer id = %d",
1114 			       peer->mac_addr.raw,
1115 			       peer->nawds_enabled,
1116 			       peer->bss_peer,
1117 			       peer->wapi,
1118 			       peer->wds_enabled,
1119 			       peer->delete_in_progress,
1120 			       peer->peer_ids[0]);
1121 	}
1122 }
1123 
1124 /*
1125  * dp_setup_srng - Internal function to setup SRNG rings used by data path
1126  */
1127 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
1128 	int ring_type, int ring_num, int mac_id, uint32_t num_entries)
1129 {
1130 	void *hal_soc = soc->hal_soc;
1131 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
1132 	/* TODO: See if we should get align size from hal */
1133 	uint32_t ring_base_align = 8;
1134 	struct hal_srng_params ring_params;
1135 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
1136 
1137 	/* TODO: Currently hal layer takes care of endianness related settings.
1138 	 * See if these settings need to passed from DP layer
1139 	 */
1140 	ring_params.flags = 0;
1141 
1142 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
1143 	srng->hal_srng = NULL;
1144 	srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
1145 	srng->num_entries = num_entries;
1146 
1147 	if (!soc->dp_soc_reinit) {
1148 		srng->base_vaddr_unaligned =
1149 			qdf_mem_alloc_consistent(soc->osdev,
1150 						 soc->osdev->dev,
1151 						 srng->alloc_size,
1152 						 &srng->base_paddr_unaligned);
1153 	}
1154 
1155 	if (!srng->base_vaddr_unaligned) {
1156 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1157 			FL("alloc failed - ring_type: %d, ring_num %d"),
1158 			ring_type, ring_num);
1159 		return QDF_STATUS_E_NOMEM;
1160 	}
1161 
1162 	ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
1163 		((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
1164 	ring_params.ring_base_paddr = srng->base_paddr_unaligned +
1165 		((unsigned long)(ring_params.ring_base_vaddr) -
1166 		(unsigned long)srng->base_vaddr_unaligned);
1167 	ring_params.num_entries = num_entries;
1168 
1169 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1170 		  FL("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u"),
1171 		  ring_type, ring_num, (void *)ring_params.ring_base_vaddr,
1172 		  (void *)ring_params.ring_base_paddr, ring_params.num_entries);
1173 
1174 	if (soc->intr_mode == DP_INTR_MSI) {
1175 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
1176 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1177 			  FL("Using MSI for ring_type: %d, ring_num %d"),
1178 			  ring_type, ring_num);
1179 
1180 	} else {
1181 		ring_params.msi_data = 0;
1182 		ring_params.msi_addr = 0;
1183 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1184 			  FL("Skipping MSI for ring_type: %d, ring_num %d"),
1185 			  ring_type, ring_num);
1186 	}
1187 
1188 	/*
1189 	 * Setup interrupt timer and batch counter thresholds for
1190 	 * interrupt mitigation based on ring type
1191 	 */
1192 	if (ring_type == REO_DST) {
1193 		ring_params.intr_timer_thres_us =
1194 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1195 		ring_params.intr_batch_cntr_thres_entries =
1196 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1197 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
1198 		ring_params.intr_timer_thres_us =
1199 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1200 		ring_params.intr_batch_cntr_thres_entries =
1201 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1202 	} else {
1203 		ring_params.intr_timer_thres_us =
1204 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1205 		ring_params.intr_batch_cntr_thres_entries =
1206 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1207 	}
1208 
1209 	/* Enable low threshold interrupts for rx buffer rings (regular and
1210 	 * monitor buffer rings.
1211 	 * TODO: See if this is required for any other ring
1212 	 */
1213 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1214 		(ring_type == RXDMA_MONITOR_STATUS)) {
1215 		/* TODO: Setting low threshold to 1/8th of ring size
1216 		 * see if this needs to be configurable
1217 		 */
1218 		ring_params.low_threshold = num_entries >> 3;
1219 		ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1220 		ring_params.intr_timer_thres_us =
1221 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1222 		ring_params.intr_batch_cntr_thres_entries = 0;
1223 	}
1224 
1225 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
1226 		mac_id, &ring_params);
1227 
1228 	if (!srng->hal_srng) {
1229 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1230 				srng->alloc_size,
1231 				srng->base_vaddr_unaligned,
1232 				srng->base_paddr_unaligned, 0);
1233 	}
1234 
1235 	return 0;
1236 }
1237 
1238 /*
1239  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
1240  * @soc: DP SOC handle
1241  * @srng: source ring structure
1242  * @ring_type: type of ring
1243  * @ring_num: ring number
1244  *
1245  * Return: None
1246  */
1247 static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
1248 			   int ring_type, int ring_num)
1249 {
1250 	if (!srng->hal_srng) {
1251 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1252 			  FL("Ring type: %d, num:%d not setup"),
1253 			  ring_type, ring_num);
1254 		return;
1255 	}
1256 
1257 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1258 	srng->hal_srng = NULL;
1259 }
1260 
1261 /**
1262  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
1263  * Any buffers allocated and attached to ring entries are expected to be freed
1264  * before calling this function.
1265  */
1266 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
1267 	int ring_type, int ring_num)
1268 {
1269 	if (!soc->dp_soc_reinit) {
1270 		if (!srng->hal_srng && (srng->alloc_size == 0)) {
1271 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1272 				  FL("Ring type: %d, num:%d not setup"),
1273 				  ring_type, ring_num);
1274 			return;
1275 		}
1276 
1277 		if (srng->hal_srng) {
1278 			hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1279 			srng->hal_srng = NULL;
1280 		}
1281 	}
1282 
1283 	if (srng->alloc_size) {
1284 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1285 					srng->alloc_size,
1286 					srng->base_vaddr_unaligned,
1287 					srng->base_paddr_unaligned, 0);
1288 		srng->alloc_size = 0;
1289 	}
1290 }
1291 
1292 /* TODO: Need this interface from HIF */
1293 void *hif_get_hal_handle(void *hif_handle);
1294 
1295 /*
1296  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1297  * @dp_ctx: DP SOC handle
1298  * @budget: Number of frames/descriptors that can be processed in one shot
1299  *
1300  * Return: remaining budget/quota for the soc device
1301  */
1302 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
1303 {
1304 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1305 	struct dp_soc *soc = int_ctx->soc;
1306 	int ring = 0;
1307 	uint32_t work_done  = 0;
1308 	int budget = dp_budget;
1309 	uint8_t tx_mask = int_ctx->tx_ring_mask;
1310 	uint8_t rx_mask = int_ctx->rx_ring_mask;
1311 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1312 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1313 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1314 	uint32_t remaining_quota = dp_budget;
1315 	struct dp_pdev *pdev = NULL;
1316 	int mac_id;
1317 
1318 	/* Process Tx completion interrupts first to return back buffers */
1319 	while (tx_mask) {
1320 		if (tx_mask & 0x1) {
1321 			work_done = dp_tx_comp_handler(soc,
1322 					soc->tx_comp_ring[ring].hal_srng,
1323 					remaining_quota);
1324 
1325 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1326 				  "tx mask 0x%x ring %d, budget %d, work_done %d",
1327 				  tx_mask, ring, budget, work_done);
1328 
1329 			budget -= work_done;
1330 			if (budget <= 0)
1331 				goto budget_done;
1332 
1333 			remaining_quota = budget;
1334 		}
1335 		tx_mask = tx_mask >> 1;
1336 		ring++;
1337 	}
1338 
1339 
1340 	/* Process REO Exception ring interrupt */
1341 	if (rx_err_mask) {
1342 		work_done = dp_rx_err_process(soc,
1343 				soc->reo_exception_ring.hal_srng,
1344 				remaining_quota);
1345 
1346 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1347 			"REO Exception Ring: work_done %d budget %d",
1348 			work_done, budget);
1349 
1350 		budget -=  work_done;
1351 		if (budget <= 0) {
1352 			goto budget_done;
1353 		}
1354 		remaining_quota = budget;
1355 	}
1356 
1357 	/* Process Rx WBM release ring interrupt */
1358 	if (rx_wbm_rel_mask) {
1359 		work_done = dp_rx_wbm_err_process(soc,
1360 				soc->rx_rel_ring.hal_srng, remaining_quota);
1361 
1362 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1363 			"WBM Release Ring: work_done %d budget %d",
1364 			work_done, budget);
1365 
1366 		budget -=  work_done;
1367 		if (budget <= 0) {
1368 			goto budget_done;
1369 		}
1370 		remaining_quota = budget;
1371 	}
1372 
1373 	/* Process Rx interrupts */
1374 	if (rx_mask) {
1375 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1376 			if (rx_mask & (1 << ring)) {
1377 				work_done = dp_rx_process(int_ctx,
1378 					    soc->reo_dest_ring[ring].hal_srng,
1379 					    ring,
1380 					    remaining_quota);
1381 
1382 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1383 					"rx mask 0x%x ring %d, work_done %d budget %d",
1384 					rx_mask, ring, work_done, budget);
1385 
1386 				budget -=  work_done;
1387 				if (budget <= 0)
1388 					goto budget_done;
1389 				remaining_quota = budget;
1390 			}
1391 		}
1392 	}
1393 
1394 	if (reo_status_mask)
1395 		dp_reo_status_ring_handler(soc);
1396 
1397 	/* Process LMAC interrupts */
1398 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
1399 		pdev = soc->pdev_list[ring];
1400 		if (pdev == NULL)
1401 			continue;
1402 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1403 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
1404 								pdev->pdev_id);
1405 
1406 			if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1407 				work_done = dp_mon_process(soc, mac_for_pdev,
1408 						remaining_quota);
1409 				budget -= work_done;
1410 				if (budget <= 0)
1411 					goto budget_done;
1412 				remaining_quota = budget;
1413 			}
1414 
1415 			if (int_ctx->rxdma2host_ring_mask &
1416 					(1 << mac_for_pdev)) {
1417 				work_done = dp_rxdma_err_process(soc,
1418 							mac_for_pdev,
1419 							remaining_quota);
1420 				budget -=  work_done;
1421 				if (budget <= 0)
1422 					goto budget_done;
1423 				remaining_quota = budget;
1424 			}
1425 
1426 			if (int_ctx->host2rxdma_ring_mask &
1427 						(1 << mac_for_pdev)) {
1428 				union dp_rx_desc_list_elem_t *desc_list = NULL;
1429 				union dp_rx_desc_list_elem_t *tail = NULL;
1430 				struct dp_srng *rx_refill_buf_ring =
1431 					&pdev->rx_refill_buf_ring;
1432 
1433 				DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1434 						1);
1435 				dp_rx_buffers_replenish(soc, mac_for_pdev,
1436 					rx_refill_buf_ring,
1437 					&soc->rx_desc_buf[mac_for_pdev], 0,
1438 					&desc_list, &tail);
1439 			}
1440 		}
1441 	}
1442 
1443 	qdf_lro_flush(int_ctx->lro_ctx);
1444 
1445 budget_done:
1446 	return dp_budget - budget;
1447 }
1448 
1449 /* dp_interrupt_timer()- timer poll for interrupts
1450  *
1451  * @arg: SoC Handle
1452  *
1453  * Return:
1454  *
1455  */
1456 static void dp_interrupt_timer(void *arg)
1457 {
1458 	struct dp_soc *soc = (struct dp_soc *) arg;
1459 	int i;
1460 
1461 	if (qdf_atomic_read(&soc->cmn_init_done)) {
1462 		for (i = 0;
1463 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1464 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
1465 
1466 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1467 	}
1468 }
1469 
1470 /*
1471  * dp_soc_attach_poll() - Register handlers for DP interrupts
1472  * @txrx_soc: DP SOC handle
1473  *
1474  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1475  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1476  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1477  *
1478  * Return: 0 for success, nonzero for failure.
1479  */
1480 static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
1481 {
1482 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1483 	int i;
1484 
1485 	soc->intr_mode = DP_INTR_POLL;
1486 
1487 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1488 		soc->intr_ctx[i].dp_intr_id = i;
1489 		soc->intr_ctx[i].tx_ring_mask =
1490 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1491 		soc->intr_ctx[i].rx_ring_mask =
1492 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1493 		soc->intr_ctx[i].rx_mon_ring_mask =
1494 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1495 		soc->intr_ctx[i].rx_err_ring_mask =
1496 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1497 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1498 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1499 		soc->intr_ctx[i].reo_status_ring_mask =
1500 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1501 		soc->intr_ctx[i].rxdma2host_ring_mask =
1502 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1503 		soc->intr_ctx[i].soc = soc;
1504 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1505 	}
1506 
1507 	qdf_timer_init(soc->osdev, &soc->int_timer,
1508 			dp_interrupt_timer, (void *)soc,
1509 			QDF_TIMER_TYPE_WAKE_APPS);
1510 
1511 	return QDF_STATUS_SUCCESS;
1512 }
1513 
1514 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
1515 #if defined(CONFIG_MCL)
1516 /*
1517  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1518  * @txrx_soc: DP SOC handle
1519  *
1520  * Call the appropriate attach function based on the mode of operation.
1521  * This is a WAR for enabling monitor mode.
1522  *
1523  * Return: 0 for success. nonzero for failure.
1524  */
1525 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1526 {
1527 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1528 
1529 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1530 	     con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
1531 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1532 				  "%s: Poll mode", __func__);
1533 		return dp_soc_attach_poll(txrx_soc);
1534 	} else {
1535 
1536 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1537 				  "%s: Interrupt  mode", __func__);
1538 		return dp_soc_interrupt_attach(txrx_soc);
1539 	}
1540 }
1541 #else
1542 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
1543 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1544 {
1545 	return dp_soc_attach_poll(txrx_soc);
1546 }
1547 #else
1548 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1549 {
1550 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1551 
1552 	if (hif_is_polled_mode_enabled(soc->hif_handle))
1553 		return dp_soc_attach_poll(txrx_soc);
1554 	else
1555 		return dp_soc_interrupt_attach(txrx_soc);
1556 }
1557 #endif
1558 #endif
1559 
1560 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1561 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1562 {
1563 	int j;
1564 	int num_irq = 0;
1565 
1566 	int tx_mask =
1567 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1568 	int rx_mask =
1569 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1570 	int rx_mon_mask =
1571 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1572 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1573 					soc->wlan_cfg_ctx, intr_ctx_num);
1574 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1575 					soc->wlan_cfg_ctx, intr_ctx_num);
1576 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1577 					soc->wlan_cfg_ctx, intr_ctx_num);
1578 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1579 					soc->wlan_cfg_ctx, intr_ctx_num);
1580 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1581 					soc->wlan_cfg_ctx, intr_ctx_num);
1582 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
1583 					soc->wlan_cfg_ctx, intr_ctx_num);
1584 
1585 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1586 
1587 		if (tx_mask & (1 << j)) {
1588 			irq_id_map[num_irq++] =
1589 				(wbm2host_tx_completions_ring1 - j);
1590 		}
1591 
1592 		if (rx_mask & (1 << j)) {
1593 			irq_id_map[num_irq++] =
1594 				(reo2host_destination_ring1 - j);
1595 		}
1596 
1597 		if (rxdma2host_ring_mask & (1 << j)) {
1598 			irq_id_map[num_irq++] =
1599 				rxdma2host_destination_ring_mac1 -
1600 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1601 		}
1602 
1603 		if (host2rxdma_ring_mask & (1 << j)) {
1604 			irq_id_map[num_irq++] =
1605 				host2rxdma_host_buf_ring_mac1 -
1606 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1607 		}
1608 
1609 		if (host2rxdma_mon_ring_mask & (1 << j)) {
1610 			irq_id_map[num_irq++] =
1611 				host2rxdma_monitor_ring1 -
1612 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1613 		}
1614 
1615 		if (rx_mon_mask & (1 << j)) {
1616 			irq_id_map[num_irq++] =
1617 				ppdu_end_interrupts_mac1 -
1618 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1619 			irq_id_map[num_irq++] =
1620 				rxdma2host_monitor_status_ring_mac1 -
1621 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1622 		}
1623 
1624 		if (rx_wbm_rel_ring_mask & (1 << j))
1625 			irq_id_map[num_irq++] = wbm2host_rx_release;
1626 
1627 		if (rx_err_ring_mask & (1 << j))
1628 			irq_id_map[num_irq++] = reo2host_exception;
1629 
1630 		if (reo_status_ring_mask & (1 << j))
1631 			irq_id_map[num_irq++] = reo2host_status;
1632 
1633 	}
1634 	*num_irq_r = num_irq;
1635 }
1636 
1637 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1638 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1639 		int msi_vector_count, int msi_vector_start)
1640 {
1641 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1642 					soc->wlan_cfg_ctx, intr_ctx_num);
1643 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1644 					soc->wlan_cfg_ctx, intr_ctx_num);
1645 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1646 					soc->wlan_cfg_ctx, intr_ctx_num);
1647 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1648 					soc->wlan_cfg_ctx, intr_ctx_num);
1649 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1650 					soc->wlan_cfg_ctx, intr_ctx_num);
1651 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1652 					soc->wlan_cfg_ctx, intr_ctx_num);
1653 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1654 					soc->wlan_cfg_ctx, intr_ctx_num);
1655 
1656 	unsigned int vector =
1657 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1658 	int num_irq = 0;
1659 
1660 	soc->intr_mode = DP_INTR_MSI;
1661 
1662 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1663 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1664 		irq_id_map[num_irq++] =
1665 			pld_get_msi_irq(soc->osdev->dev, vector);
1666 
1667 	*num_irq_r = num_irq;
1668 }
1669 
1670 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1671 				    int *irq_id_map, int *num_irq)
1672 {
1673 	int msi_vector_count, ret;
1674 	uint32_t msi_base_data, msi_vector_start;
1675 
1676 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1677 					    &msi_vector_count,
1678 					    &msi_base_data,
1679 					    &msi_vector_start);
1680 	if (ret)
1681 		return dp_soc_interrupt_map_calculate_integrated(soc,
1682 				intr_ctx_num, irq_id_map, num_irq);
1683 
1684 	else
1685 		dp_soc_interrupt_map_calculate_msi(soc,
1686 				intr_ctx_num, irq_id_map, num_irq,
1687 				msi_vector_count, msi_vector_start);
1688 }
1689 
1690 /*
1691  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1692  * @txrx_soc: DP SOC handle
1693  *
1694  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1695  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1696  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1697  *
1698  * Return: 0 for success. nonzero for failure.
1699  */
1700 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
1701 {
1702 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1703 
1704 	int i = 0;
1705 	int num_irq = 0;
1706 
1707 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1708 		int ret = 0;
1709 
1710 		/* Map of IRQ ids registered with one interrupt context */
1711 		int irq_id_map[HIF_MAX_GRP_IRQ];
1712 
1713 		int tx_mask =
1714 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1715 		int rx_mask =
1716 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1717 		int rx_mon_mask =
1718 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
1719 		int rx_err_ring_mask =
1720 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1721 		int rx_wbm_rel_ring_mask =
1722 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1723 		int reo_status_ring_mask =
1724 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1725 		int rxdma2host_ring_mask =
1726 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1727 		int host2rxdma_ring_mask =
1728 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1729 		int host2rxdma_mon_ring_mask =
1730 			wlan_cfg_get_host2rxdma_mon_ring_mask(
1731 				soc->wlan_cfg_ctx, i);
1732 
1733 		soc->intr_ctx[i].dp_intr_id = i;
1734 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1735 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1736 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1737 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1738 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1739 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1740 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1741 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1742 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
1743 			 host2rxdma_mon_ring_mask;
1744 
1745 		soc->intr_ctx[i].soc = soc;
1746 
1747 		num_irq = 0;
1748 
1749 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1750 					       &num_irq);
1751 
1752 		ret = hif_register_ext_group(soc->hif_handle,
1753 				num_irq, irq_id_map, dp_service_srngs,
1754 				&soc->intr_ctx[i], "dp_intr",
1755 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1756 
1757 		if (ret) {
1758 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1759 			FL("failed, ret = %d"), ret);
1760 
1761 			return QDF_STATUS_E_FAILURE;
1762 		}
1763 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1764 	}
1765 
1766 	hif_configure_ext_group_interrupts(soc->hif_handle);
1767 
1768 	return QDF_STATUS_SUCCESS;
1769 }
1770 
1771 /*
1772  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1773  * @txrx_soc: DP SOC handle
1774  *
1775  * Return: void
1776  */
1777 static void dp_soc_interrupt_detach(void *txrx_soc)
1778 {
1779 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1780 	int i;
1781 
1782 	if (soc->intr_mode == DP_INTR_POLL) {
1783 		qdf_timer_stop(&soc->int_timer);
1784 		qdf_timer_free(&soc->int_timer);
1785 	} else {
1786 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1787 	}
1788 
1789 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1790 		soc->intr_ctx[i].tx_ring_mask = 0;
1791 		soc->intr_ctx[i].rx_ring_mask = 0;
1792 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1793 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1794 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1795 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1796 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1797 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1798 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
1799 
1800 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1801 	}
1802 }
1803 
1804 #define AVG_MAX_MPDUS_PER_TID 128
1805 #define AVG_TIDS_PER_CLIENT 2
1806 #define AVG_FLOWS_PER_TID 2
1807 #define AVG_MSDUS_PER_FLOW 128
1808 #define AVG_MSDUS_PER_MPDU 4
1809 
1810 /*
1811  * Allocate and setup link descriptor pool that will be used by HW for
1812  * various link and queue descriptors and managed by WBM
1813  */
1814 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1815 {
1816 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1817 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1818 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1819 	uint32_t num_mpdus_per_link_desc =
1820 		hal_num_mpdus_per_link_desc(soc->hal_soc);
1821 	uint32_t num_msdus_per_link_desc =
1822 		hal_num_msdus_per_link_desc(soc->hal_soc);
1823 	uint32_t num_mpdu_links_per_queue_desc =
1824 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1825 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1826 	uint32_t total_link_descs, total_mem_size;
1827 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1828 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1829 	uint32_t num_link_desc_banks;
1830 	uint32_t last_bank_size = 0;
1831 	uint32_t entry_size, num_entries;
1832 	int i;
1833 	uint32_t desc_id = 0;
1834 	qdf_dma_addr_t *baseaddr = NULL;
1835 
1836 	/* Only Tx queue descriptors are allocated from common link descriptor
1837 	 * pool Rx queue descriptors are not included in this because (REO queue
1838 	 * extension descriptors) they are expected to be allocated contiguously
1839 	 * with REO queue descriptors
1840 	 */
1841 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1842 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1843 
1844 	num_mpdu_queue_descs = num_mpdu_link_descs /
1845 		num_mpdu_links_per_queue_desc;
1846 
1847 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1848 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1849 		num_msdus_per_link_desc;
1850 
1851 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1852 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1853 
1854 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1855 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1856 
1857 	/* Round up to power of 2 */
1858 	total_link_descs = 1;
1859 	while (total_link_descs < num_entries)
1860 		total_link_descs <<= 1;
1861 
1862 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1863 		FL("total_link_descs: %u, link_desc_size: %d"),
1864 		total_link_descs, link_desc_size);
1865 	total_mem_size =  total_link_descs * link_desc_size;
1866 
1867 	total_mem_size += link_desc_align;
1868 
1869 	if (total_mem_size <= max_alloc_size) {
1870 		num_link_desc_banks = 0;
1871 		last_bank_size = total_mem_size;
1872 	} else {
1873 		num_link_desc_banks = (total_mem_size) /
1874 			(max_alloc_size - link_desc_align);
1875 		last_bank_size = total_mem_size %
1876 			(max_alloc_size - link_desc_align);
1877 	}
1878 
1879 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1880 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
1881 		total_mem_size, num_link_desc_banks);
1882 
1883 	for (i = 0; i < num_link_desc_banks; i++) {
1884 		if (!soc->dp_soc_reinit) {
1885 			baseaddr = &soc->link_desc_banks[i].
1886 					base_paddr_unaligned;
1887 			soc->link_desc_banks[i].base_vaddr_unaligned =
1888 				qdf_mem_alloc_consistent(soc->osdev,
1889 							 soc->osdev->dev,
1890 							 max_alloc_size,
1891 							 baseaddr);
1892 		}
1893 		soc->link_desc_banks[i].size = max_alloc_size;
1894 
1895 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1896 			soc->link_desc_banks[i].base_vaddr_unaligned) +
1897 			((unsigned long)(
1898 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1899 			link_desc_align));
1900 
1901 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
1902 			soc->link_desc_banks[i].base_paddr_unaligned) +
1903 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1904 			(unsigned long)(
1905 			soc->link_desc_banks[i].base_vaddr_unaligned));
1906 
1907 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
1908 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1909 				FL("Link descriptor memory alloc failed"));
1910 			goto fail;
1911 		}
1912 	}
1913 
1914 	if (last_bank_size) {
1915 		/* Allocate last bank in case total memory required is not exact
1916 		 * multiple of max_alloc_size
1917 		 */
1918 		if (!soc->dp_soc_reinit) {
1919 			baseaddr = &soc->link_desc_banks[i].
1920 					base_paddr_unaligned;
1921 			soc->link_desc_banks[i].base_vaddr_unaligned =
1922 				qdf_mem_alloc_consistent(soc->osdev,
1923 							 soc->osdev->dev,
1924 							 last_bank_size,
1925 							 baseaddr);
1926 		}
1927 		soc->link_desc_banks[i].size = last_bank_size;
1928 
1929 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1930 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
1931 			((unsigned long)(
1932 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1933 			link_desc_align));
1934 
1935 		soc->link_desc_banks[i].base_paddr =
1936 			(unsigned long)(
1937 			soc->link_desc_banks[i].base_paddr_unaligned) +
1938 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1939 			(unsigned long)(
1940 			soc->link_desc_banks[i].base_vaddr_unaligned));
1941 	}
1942 
1943 
1944 	/* Allocate and setup link descriptor idle list for HW internal use */
1945 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1946 	total_mem_size = entry_size * total_link_descs;
1947 
1948 	if (total_mem_size <= max_alloc_size) {
1949 		void *desc;
1950 
1951 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
1952 			WBM_IDLE_LINK, 0, 0, total_link_descs)) {
1953 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1954 				FL("Link desc idle ring setup failed"));
1955 			goto fail;
1956 		}
1957 
1958 		hal_srng_access_start_unlocked(soc->hal_soc,
1959 			soc->wbm_idle_link_ring.hal_srng);
1960 
1961 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1962 			soc->link_desc_banks[i].base_paddr; i++) {
1963 			uint32_t num_entries = (soc->link_desc_banks[i].size -
1964 				((unsigned long)(
1965 				soc->link_desc_banks[i].base_vaddr) -
1966 				(unsigned long)(
1967 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1968 				/ link_desc_size;
1969 			unsigned long paddr = (unsigned long)(
1970 				soc->link_desc_banks[i].base_paddr);
1971 
1972 			while (num_entries && (desc = hal_srng_src_get_next(
1973 				soc->hal_soc,
1974 				soc->wbm_idle_link_ring.hal_srng))) {
1975 				hal_set_link_desc_addr(desc,
1976 					LINK_DESC_COOKIE(desc_id, i), paddr);
1977 				num_entries--;
1978 				desc_id++;
1979 				paddr += link_desc_size;
1980 			}
1981 		}
1982 		hal_srng_access_end_unlocked(soc->hal_soc,
1983 			soc->wbm_idle_link_ring.hal_srng);
1984 	} else {
1985 		uint32_t num_scatter_bufs;
1986 		uint32_t num_entries_per_buf;
1987 		uint32_t rem_entries;
1988 		uint8_t *scatter_buf_ptr;
1989 		uint16_t scatter_buf_num;
1990 		uint32_t buf_size = 0;
1991 
1992 		soc->wbm_idle_scatter_buf_size =
1993 			hal_idle_list_scatter_buf_size(soc->hal_soc);
1994 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
1995 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
1996 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1997 					soc->hal_soc, total_mem_size,
1998 					soc->wbm_idle_scatter_buf_size);
1999 
2000 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
2001 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2002 					FL("scatter bufs size out of bounds"));
2003 			goto fail;
2004 		}
2005 
2006 		for (i = 0; i < num_scatter_bufs; i++) {
2007 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
2008 			if (!soc->dp_soc_reinit) {
2009 				buf_size = soc->wbm_idle_scatter_buf_size;
2010 				soc->wbm_idle_scatter_buf_base_vaddr[i] =
2011 					qdf_mem_alloc_consistent(soc->osdev,
2012 								 soc->osdev->
2013 								 dev,
2014 								 buf_size,
2015 								 baseaddr);
2016 			}
2017 			if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
2018 				QDF_TRACE(QDF_MODULE_ID_DP,
2019 					  QDF_TRACE_LEVEL_ERROR,
2020 					  FL("Scatter lst memory alloc fail"));
2021 				goto fail;
2022 			}
2023 		}
2024 
2025 		/* Populate idle list scatter buffers with link descriptor
2026 		 * pointers
2027 		 */
2028 		scatter_buf_num = 0;
2029 		scatter_buf_ptr = (uint8_t *)(
2030 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
2031 		rem_entries = num_entries_per_buf;
2032 
2033 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
2034 			soc->link_desc_banks[i].base_paddr; i++) {
2035 			uint32_t num_link_descs =
2036 				(soc->link_desc_banks[i].size -
2037 				((unsigned long)(
2038 				soc->link_desc_banks[i].base_vaddr) -
2039 				(unsigned long)(
2040 				soc->link_desc_banks[i].base_vaddr_unaligned)))
2041 				/ link_desc_size;
2042 			unsigned long paddr = (unsigned long)(
2043 				soc->link_desc_banks[i].base_paddr);
2044 
2045 			while (num_link_descs) {
2046 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
2047 					LINK_DESC_COOKIE(desc_id, i), paddr);
2048 				num_link_descs--;
2049 				desc_id++;
2050 				paddr += link_desc_size;
2051 				rem_entries--;
2052 				if (rem_entries) {
2053 					scatter_buf_ptr += entry_size;
2054 				} else {
2055 					rem_entries = num_entries_per_buf;
2056 					scatter_buf_num++;
2057 
2058 					if (scatter_buf_num >= num_scatter_bufs)
2059 						break;
2060 
2061 					scatter_buf_ptr = (uint8_t *)(
2062 						soc->wbm_idle_scatter_buf_base_vaddr[
2063 						scatter_buf_num]);
2064 				}
2065 			}
2066 		}
2067 		/* Setup link descriptor idle list in HW */
2068 		hal_setup_link_idle_list(soc->hal_soc,
2069 			soc->wbm_idle_scatter_buf_base_paddr,
2070 			soc->wbm_idle_scatter_buf_base_vaddr,
2071 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
2072 			(uint32_t)(scatter_buf_ptr -
2073 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
2074 			scatter_buf_num-1])), total_link_descs);
2075 	}
2076 	return 0;
2077 
2078 fail:
2079 	if (soc->wbm_idle_link_ring.hal_srng) {
2080 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
2081 				WBM_IDLE_LINK, 0);
2082 	}
2083 
2084 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2085 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2086 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2087 				soc->wbm_idle_scatter_buf_size,
2088 				soc->wbm_idle_scatter_buf_base_vaddr[i],
2089 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
2090 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
2091 		}
2092 	}
2093 
2094 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
2095 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
2096 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2097 				soc->link_desc_banks[i].size,
2098 				soc->link_desc_banks[i].base_vaddr_unaligned,
2099 				soc->link_desc_banks[i].base_paddr_unaligned,
2100 				0);
2101 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
2102 		}
2103 	}
2104 	return QDF_STATUS_E_FAILURE;
2105 }
2106 
2107 /*
2108  * Free link descriptor pool that was setup HW
2109  */
2110 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
2111 {
2112 	int i;
2113 
2114 	if (soc->wbm_idle_link_ring.hal_srng) {
2115 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
2116 			WBM_IDLE_LINK, 0);
2117 	}
2118 
2119 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2120 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2121 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2122 				soc->wbm_idle_scatter_buf_size,
2123 				soc->wbm_idle_scatter_buf_base_vaddr[i],
2124 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
2125 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
2126 		}
2127 	}
2128 
2129 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
2130 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
2131 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2132 				soc->link_desc_banks[i].size,
2133 				soc->link_desc_banks[i].base_vaddr_unaligned,
2134 				soc->link_desc_banks[i].base_paddr_unaligned,
2135 				0);
2136 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
2137 		}
2138 	}
2139 }
2140 
2141 #ifdef IPA_OFFLOAD
2142 #define REO_DST_RING_SIZE_QCA6290 1023
2143 #ifndef QCA_WIFI_QCA8074_VP
2144 #define REO_DST_RING_SIZE_QCA8074 1023
2145 #else
2146 #define REO_DST_RING_SIZE_QCA8074 8
2147 #endif /* QCA_WIFI_QCA8074_VP */
2148 
2149 #else
2150 
2151 #define REO_DST_RING_SIZE_QCA6290 1024
2152 #ifndef QCA_WIFI_QCA8074_VP
2153 #define REO_DST_RING_SIZE_QCA8074 2048
2154 #else
2155 #define REO_DST_RING_SIZE_QCA8074 8
2156 #endif /* QCA_WIFI_QCA8074_VP */
2157 #endif /* IPA_OFFLOAD */
2158 
2159 /*
2160  * dp_ast_aging_timer_fn() - Timer callback function for WDS aging
2161  * @soc: Datapath SOC handle
2162  *
2163  * This is a timer function used to age out stale AST nodes from
2164  * AST table
2165  */
2166 #ifdef FEATURE_WDS
2167 static void dp_ast_aging_timer_fn(void *soc_hdl)
2168 {
2169 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
2170 	struct dp_pdev *pdev;
2171 	struct dp_vdev *vdev;
2172 	struct dp_peer *peer;
2173 	struct dp_ast_entry *ase, *temp_ase;
2174 	int i;
2175 	bool check_wds_ase = false;
2176 
2177 	if (soc->wds_ast_aging_timer_cnt++ >= DP_WDS_AST_AGING_TIMER_CNT) {
2178 		soc->wds_ast_aging_timer_cnt = 0;
2179 		check_wds_ase = true;
2180 	}
2181 
2182 	 /* Peer list access lock */
2183 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
2184 
2185 	/* AST list access lock */
2186 	qdf_spin_lock_bh(&soc->ast_lock);
2187 
2188 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
2189 		pdev = soc->pdev_list[i];
2190 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
2191 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
2192 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
2193 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
2194 					/*
2195 					 * Do not expire static ast entries
2196 					 * and HM WDS entries
2197 					 */
2198 					if (ase->type !=
2199 					    CDP_TXRX_AST_TYPE_WDS &&
2200 					    ase->type !=
2201 					    CDP_TXRX_AST_TYPE_MEC &&
2202 					    ase->type !=
2203 					    CDP_TXRX_AST_TYPE_DA)
2204 						continue;
2205 
2206 					/* Expire MEC entry every n sec.
2207 					 * This needs to be expired in
2208 					 * case if STA backbone is made as
2209 					 * AP backbone, In this case it needs
2210 					 * to be re-added as a WDS entry.
2211 					 */
2212 					if (ase->is_active && ase->type ==
2213 					    CDP_TXRX_AST_TYPE_MEC) {
2214 						ase->is_active = FALSE;
2215 						continue;
2216 					} else if (ase->is_active &&
2217 						   check_wds_ase) {
2218 						ase->is_active = FALSE;
2219 						continue;
2220 					}
2221 
2222 					if (ase->type ==
2223 					    CDP_TXRX_AST_TYPE_MEC) {
2224 						DP_STATS_INC(soc,
2225 							     ast.aged_out, 1);
2226 						dp_peer_del_ast(soc, ase);
2227 					} else if (check_wds_ase) {
2228 						DP_STATS_INC(soc,
2229 							     ast.aged_out, 1);
2230 						dp_peer_del_ast(soc, ase);
2231 					}
2232 				}
2233 			}
2234 		}
2235 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2236 	}
2237 
2238 	qdf_spin_unlock_bh(&soc->ast_lock);
2239 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2240 
2241 	if (qdf_atomic_read(&soc->cmn_init_done))
2242 		qdf_timer_mod(&soc->ast_aging_timer,
2243 			      DP_AST_AGING_TIMER_DEFAULT_MS);
2244 }
2245 
2246 
2247 /*
2248  * dp_soc_wds_attach() - Setup WDS timer and AST table
2249  * @soc:		Datapath SOC handle
2250  *
2251  * Return: None
2252  */
2253 static void dp_soc_wds_attach(struct dp_soc *soc)
2254 {
2255 	soc->wds_ast_aging_timer_cnt = 0;
2256 	qdf_timer_init(soc->osdev, &soc->ast_aging_timer,
2257 		       dp_ast_aging_timer_fn, (void *)soc,
2258 		       QDF_TIMER_TYPE_WAKE_APPS);
2259 
2260 	qdf_timer_mod(&soc->ast_aging_timer, DP_AST_AGING_TIMER_DEFAULT_MS);
2261 }
2262 
2263 /*
2264  * dp_soc_wds_detach() - Detach WDS data structures and timers
2265  * @txrx_soc: DP SOC handle
2266  *
2267  * Return: None
2268  */
2269 static void dp_soc_wds_detach(struct dp_soc *soc)
2270 {
2271 	qdf_timer_stop(&soc->ast_aging_timer);
2272 	qdf_timer_free(&soc->ast_aging_timer);
2273 }
2274 #else
2275 static void dp_soc_wds_attach(struct dp_soc *soc)
2276 {
2277 }
2278 
2279 static void dp_soc_wds_detach(struct dp_soc *soc)
2280 {
2281 }
2282 #endif
2283 
2284 /*
2285  * dp_soc_reset_ring_map() - Reset cpu ring map
2286  * @soc: Datapath soc handler
2287  *
2288  * This api resets the default cpu ring map
2289  */
2290 
2291 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
2292 {
2293 	uint8_t i;
2294 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2295 
2296 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
2297 		switch (nss_config) {
2298 		case dp_nss_cfg_first_radio:
2299 			/*
2300 			 * Setting Tx ring map for one nss offloaded radio
2301 			 */
2302 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
2303 			break;
2304 
2305 		case dp_nss_cfg_second_radio:
2306 			/*
2307 			 * Setting Tx ring for two nss offloaded radios
2308 			 */
2309 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
2310 			break;
2311 
2312 		case dp_nss_cfg_dbdc:
2313 			/*
2314 			 * Setting Tx ring map for 2 nss offloaded radios
2315 			 */
2316 			soc->tx_ring_map[i] =
2317 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
2318 			break;
2319 
2320 		case dp_nss_cfg_dbtc:
2321 			/*
2322 			 * Setting Tx ring map for 3 nss offloaded radios
2323 			 */
2324 			soc->tx_ring_map[i] =
2325 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
2326 			break;
2327 
2328 		default:
2329 			dp_err("tx_ring_map failed due to invalid nss cfg");
2330 			break;
2331 		}
2332 	}
2333 }
2334 
2335 /*
2336  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
2337  * @dp_soc - DP soc handle
2338  * @ring_type - ring type
2339  * @ring_num - ring_num
2340  *
2341  * return 0 or 1
2342  */
2343 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
2344 {
2345 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2346 	uint8_t status = 0;
2347 
2348 	switch (ring_type) {
2349 	case WBM2SW_RELEASE:
2350 	case REO_DST:
2351 	case RXDMA_BUF:
2352 		status = ((nss_config) & (1 << ring_num));
2353 		break;
2354 	default:
2355 		break;
2356 	}
2357 
2358 	return status;
2359 }
2360 
2361 /*
2362  * dp_soc_reset_intr_mask() - reset interrupt mask
2363  * @dp_soc - DP Soc handle
2364  *
2365  * Return: Return void
2366  */
2367 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
2368 {
2369 	uint8_t j;
2370 	int *grp_mask = NULL;
2371 	int group_number, mask, num_ring;
2372 
2373 	/* number of tx ring */
2374 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2375 
2376 	/*
2377 	 * group mask for tx completion  ring.
2378 	 */
2379 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2380 
2381 	/* loop and reset the mask for only offloaded ring */
2382 	for (j = 0; j < num_ring; j++) {
2383 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2384 			continue;
2385 		}
2386 
2387 		/*
2388 		 * Group number corresponding to tx offloaded ring.
2389 		 */
2390 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2391 		if (group_number < 0) {
2392 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2393 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2394 					WBM2SW_RELEASE, j);
2395 			return;
2396 		}
2397 
2398 		/* reset the tx mask for offloaded ring */
2399 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2400 		mask &= (~(1 << j));
2401 
2402 		/*
2403 		 * reset the interrupt mask for offloaded ring.
2404 		 */
2405 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2406 	}
2407 
2408 	/* number of rx rings */
2409 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2410 
2411 	/*
2412 	 * group mask for reo destination ring.
2413 	 */
2414 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2415 
2416 	/* loop and reset the mask for only offloaded ring */
2417 	for (j = 0; j < num_ring; j++) {
2418 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2419 			continue;
2420 		}
2421 
2422 		/*
2423 		 * Group number corresponding to rx offloaded ring.
2424 		 */
2425 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2426 		if (group_number < 0) {
2427 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2428 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2429 					REO_DST, j);
2430 			return;
2431 		}
2432 
2433 		/* set the interrupt mask for offloaded ring */
2434 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2435 		mask &= (~(1 << j));
2436 
2437 		/*
2438 		 * set the interrupt mask to zero for rx offloaded radio.
2439 		 */
2440 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2441 	}
2442 
2443 	/*
2444 	 * group mask for Rx buffer refill ring
2445 	 */
2446 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2447 
2448 	/* loop and reset the mask for only offloaded ring */
2449 	for (j = 0; j < MAX_PDEV_CNT; j++) {
2450 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2451 			continue;
2452 		}
2453 
2454 		/*
2455 		 * Group number corresponding to rx offloaded ring.
2456 		 */
2457 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2458 		if (group_number < 0) {
2459 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2460 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2461 					REO_DST, j);
2462 			return;
2463 		}
2464 
2465 		/* set the interrupt mask for offloaded ring */
2466 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2467 				group_number);
2468 		mask &= (~(1 << j));
2469 
2470 		/*
2471 		 * set the interrupt mask to zero for rx offloaded radio.
2472 		 */
2473 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2474 			group_number, mask);
2475 	}
2476 }
2477 
2478 #ifdef IPA_OFFLOAD
2479 /**
2480  * dp_reo_remap_config() - configure reo remap register value based
2481  *                         nss configuration.
2482  *		based on offload_radio value below remap configuration
2483  *		get applied.
2484  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2485  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2486  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2487  *		3 - both Radios handled by NSS (remap not required)
2488  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2489  *
2490  * @remap1: output parameter indicates reo remap 1 register value
2491  * @remap2: output parameter indicates reo remap 2 register value
2492  * Return: bool type, true if remap is configured else false.
2493  */
2494 static bool dp_reo_remap_config(struct dp_soc *soc,
2495 				uint32_t *remap1,
2496 				uint32_t *remap2)
2497 {
2498 	*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
2499 		(0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
2500 
2501 	*remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
2502 		(0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
2503 
2504 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
2505 
2506 	return true;
2507 }
2508 #else
2509 static bool dp_reo_remap_config(struct dp_soc *soc,
2510 				uint32_t *remap1,
2511 				uint32_t *remap2)
2512 {
2513 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2514 
2515 	switch (offload_radio) {
2516 	case dp_nss_cfg_default:
2517 		*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2518 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2519 			(0x3 << 18) | (0x4 << 21)) << 8;
2520 
2521 		*remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2522 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2523 			(0x3 << 18) | (0x4 << 21)) << 8;
2524 		break;
2525 	case dp_nss_cfg_first_radio:
2526 		*remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
2527 			(0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
2528 			(0x2 << 18) | (0x3 << 21)) << 8;
2529 
2530 		*remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
2531 			(0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
2532 			(0x4 << 18) | (0x2 << 21)) << 8;
2533 		break;
2534 
2535 	case dp_nss_cfg_second_radio:
2536 		*remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
2537 			(0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
2538 			(0x1 << 18) | (0x3 << 21)) << 8;
2539 
2540 		*remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
2541 			(0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
2542 			(0x4 << 18) | (0x1 << 21)) << 8;
2543 		break;
2544 
2545 	case dp_nss_cfg_dbdc:
2546 	case dp_nss_cfg_dbtc:
2547 		/* return false if both or all are offloaded to NSS */
2548 		return false;
2549 	}
2550 
2551 	dp_debug("remap1 %x remap2 %x offload_radio %u",
2552 		 *remap1, *remap2, offload_radio);
2553 	return true;
2554 }
2555 #endif
2556 
2557 /*
2558  * dp_reo_frag_dst_set() - configure reo register to set the
2559  *                        fragment destination ring
2560  * @soc : Datapath soc
2561  * @frag_dst_ring : output parameter to set fragment destination ring
2562  *
2563  * Based on offload_radio below fragment destination rings is selected
2564  * 0 - TCL
2565  * 1 - SW1
2566  * 2 - SW2
2567  * 3 - SW3
2568  * 4 - SW4
2569  * 5 - Release
2570  * 6 - FW
2571  * 7 - alternate select
2572  *
2573  * return: void
2574  */
2575 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2576 {
2577 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2578 
2579 	switch (offload_radio) {
2580 	case dp_nss_cfg_default:
2581 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2582 		break;
2583 	case dp_nss_cfg_dbdc:
2584 	case dp_nss_cfg_dbtc:
2585 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2586 		break;
2587 	default:
2588 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2589 				FL("dp_reo_frag_dst_set invalid offload radio config"));
2590 		break;
2591 	}
2592 }
2593 
2594 /*
2595  * dp_soc_cmn_setup() - Common SoC level initializion
2596  * @soc:		Datapath SOC handle
2597  *
2598  * This is an internal function used to setup common SOC data structures,
2599  * to be called from PDEV attach after receiving HW mode capabilities from FW
2600  */
2601 static int dp_soc_cmn_setup(struct dp_soc *soc)
2602 {
2603 	int i;
2604 	struct hal_reo_params reo_params;
2605 	int tx_ring_size;
2606 	int tx_comp_ring_size;
2607 	int reo_dst_ring_size;
2608 	uint32_t entries;
2609 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2610 
2611 	if (qdf_atomic_read(&soc->cmn_init_done))
2612 		return 0;
2613 
2614 	if (dp_hw_link_desc_pool_setup(soc))
2615 		goto fail1;
2616 
2617 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2618 	/* Setup SRNG rings */
2619 	/* Common rings */
2620 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
2621 		wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx))) {
2622 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2623 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
2624 		goto fail1;
2625 	}
2626 
2627 	soc->num_tcl_data_rings = 0;
2628 	/* Tx data rings */
2629 	if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
2630 		soc->num_tcl_data_rings =
2631 			wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
2632 		tx_comp_ring_size =
2633 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2634 		tx_ring_size =
2635 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2636 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2637 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
2638 				TCL_DATA, i, 0, tx_ring_size)) {
2639 				QDF_TRACE(QDF_MODULE_ID_DP,
2640 					QDF_TRACE_LEVEL_ERROR,
2641 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
2642 				goto fail1;
2643 			}
2644 			/*
2645 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2646 			 * count
2647 			 */
2648 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
2649 				WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
2650 				QDF_TRACE(QDF_MODULE_ID_DP,
2651 					QDF_TRACE_LEVEL_ERROR,
2652 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
2653 				goto fail1;
2654 			}
2655 		}
2656 	} else {
2657 		/* This will be incremented during per pdev ring setup */
2658 		soc->num_tcl_data_rings = 0;
2659 	}
2660 
2661 	if (dp_tx_soc_attach(soc)) {
2662 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2663 				FL("dp_tx_soc_attach failed"));
2664 		goto fail1;
2665 	}
2666 
2667 	entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
2668 	/* TCL command and status rings */
2669 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
2670 			  entries)) {
2671 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2672 			FL("dp_srng_setup failed for tcl_cmd_ring"));
2673 		goto fail1;
2674 	}
2675 
2676 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
2677 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
2678 			  entries)) {
2679 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2680 			FL("dp_srng_setup failed for tcl_status_ring"));
2681 		goto fail1;
2682 	}
2683 
2684 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2685 
2686 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2687 	 * descriptors
2688 	 */
2689 
2690 	/* Rx data rings */
2691 	if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2692 		soc->num_reo_dest_rings =
2693 			wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
2694 		QDF_TRACE(QDF_MODULE_ID_DP,
2695 			QDF_TRACE_LEVEL_INFO,
2696 			FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
2697 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2698 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2699 				i, 0, reo_dst_ring_size)) {
2700 				QDF_TRACE(QDF_MODULE_ID_DP,
2701 					  QDF_TRACE_LEVEL_ERROR,
2702 					  FL(RNG_ERR "reo_dest_ring [%d]"), i);
2703 				goto fail1;
2704 			}
2705 		}
2706 	} else {
2707 		/* This will be incremented during per pdev ring setup */
2708 		soc->num_reo_dest_rings = 0;
2709 	}
2710 
2711 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2712 	/* LMAC RxDMA to SW Rings configuration */
2713 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
2714 		/* Only valid for MCL */
2715 		struct dp_pdev *pdev = soc->pdev_list[0];
2716 
2717 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2718 			if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2719 					  RXDMA_DST, 0, i,
2720 					  entries)) {
2721 				QDF_TRACE(QDF_MODULE_ID_DP,
2722 					  QDF_TRACE_LEVEL_ERROR,
2723 					  FL(RNG_ERR "rxdma_err_dst_ring"));
2724 				goto fail1;
2725 			}
2726 		}
2727 	}
2728 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
2729 
2730 	/* REO reinjection ring */
2731 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
2732 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2733 			  entries)) {
2734 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2735 			  FL("dp_srng_setup failed for reo_reinject_ring"));
2736 		goto fail1;
2737 	}
2738 
2739 
2740 	/* Rx release ring */
2741 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2742 		wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx))) {
2743 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2744 			  FL("dp_srng_setup failed for rx_rel_ring"));
2745 		goto fail1;
2746 	}
2747 
2748 
2749 	/* Rx exception ring */
2750 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
2751 	if (dp_srng_setup(soc, &soc->reo_exception_ring,
2752 			  REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries)) {
2753 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2754 			  FL("dp_srng_setup failed for reo_exception_ring"));
2755 		goto fail1;
2756 	}
2757 
2758 
2759 	/* REO command and status rings */
2760 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
2761 		wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx))) {
2762 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2763 			FL("dp_srng_setup failed for reo_cmd_ring"));
2764 		goto fail1;
2765 	}
2766 
2767 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2768 	TAILQ_INIT(&soc->rx.reo_cmd_list);
2769 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2770 
2771 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
2772 		wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx))) {
2773 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2774 			FL("dp_srng_setup failed for reo_status_ring"));
2775 		goto fail1;
2776 	}
2777 
2778 
2779 	/* Reset the cpu ring map if radio is NSS offloaded */
2780 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
2781 		dp_soc_reset_cpu_ring_map(soc);
2782 		dp_soc_reset_intr_mask(soc);
2783 	}
2784 
2785 	/* Setup HW REO */
2786 	qdf_mem_zero(&reo_params, sizeof(reo_params));
2787 
2788 	if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
2789 
2790 		/*
2791 		 * Reo ring remap is not required if both radios
2792 		 * are offloaded to NSS
2793 		 */
2794 		if (!dp_reo_remap_config(soc,
2795 					&reo_params.remap1,
2796 					&reo_params.remap2))
2797 			goto out;
2798 
2799 		reo_params.rx_hash_enabled = true;
2800 	}
2801 
2802 	/* setup the global rx defrag waitlist */
2803 	TAILQ_INIT(&soc->rx.defrag.waitlist);
2804 	soc->rx.defrag.timeout_ms =
2805 		wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
2806 	soc->rx.flags.defrag_timeout_check =
2807 		wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
2808 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
2809 
2810 out:
2811 	/*
2812 	 * set the fragment destination ring
2813 	 */
2814 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2815 
2816 	hal_reo_setup(soc->hal_soc, &reo_params);
2817 
2818 	qdf_atomic_set(&soc->cmn_init_done, 1);
2819 	dp_soc_wds_attach(soc);
2820 
2821 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
2822 	return 0;
2823 fail1:
2824 	/*
2825 	 * Cleanup will be done as part of soc_detach, which will
2826 	 * be called on pdev attach failure
2827 	 */
2828 	return QDF_STATUS_E_FAILURE;
2829 }
2830 
2831 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
2832 
2833 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2834 {
2835 	struct cdp_lro_hash_config lro_hash;
2836 	QDF_STATUS status;
2837 
2838 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2839 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
2840 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2841 		dp_err("LRO, GRO and RX hash disabled");
2842 		return QDF_STATUS_E_FAILURE;
2843 	}
2844 
2845 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2846 
2847 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
2848 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
2849 		lro_hash.lro_enable = 1;
2850 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2851 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
2852 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2853 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
2854 	}
2855 
2856 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
2857 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2858 		 LRO_IPV4_SEED_ARR_SZ));
2859 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2860 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2861 		 LRO_IPV6_SEED_ARR_SZ));
2862 
2863 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2864 
2865 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
2866 		QDF_BUG(0);
2867 		dp_err("lro_hash_config not configured");
2868 		return QDF_STATUS_E_FAILURE;
2869 	}
2870 
2871 	status = soc->cdp_soc.ol_ops->lro_hash_config(pdev->ctrl_pdev,
2872 						      &lro_hash);
2873 	if (!QDF_IS_STATUS_SUCCESS(status)) {
2874 		dp_err("failed to send lro_hash_config to FW %u", status);
2875 		return status;
2876 	}
2877 
2878 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
2879 		lro_hash.lro_enable, lro_hash.tcp_flag,
2880 		lro_hash.tcp_flag_mask);
2881 
2882 	dp_info("toeplitz_hash_ipv4:");
2883 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2884 			   (void *)lro_hash.toeplitz_hash_ipv4,
2885 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2886 			   LRO_IPV4_SEED_ARR_SZ));
2887 
2888 	dp_info("toeplitz_hash_ipv6:");
2889 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2890 			   (void *)lro_hash.toeplitz_hash_ipv6,
2891 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2892 			   LRO_IPV6_SEED_ARR_SZ));
2893 
2894 	return status;
2895 }
2896 
2897 /*
2898 * dp_rxdma_ring_setup() - configure the RX DMA rings
2899 * @soc: data path SoC handle
2900 * @pdev: Physical device handle
2901 *
2902 * Return: 0 - success, > 0 - failure
2903 */
2904 #ifdef QCA_HOST2FW_RXBUF_RING
2905 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2906 	 struct dp_pdev *pdev)
2907 {
2908 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2909 	int max_mac_rings;
2910 	int i;
2911 
2912 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2913 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
2914 
2915 	for (i = 0; i < max_mac_rings; i++) {
2916 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2917 			 "%s: pdev_id %d mac_id %d",
2918 			 __func__, pdev->pdev_id, i);
2919 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
2920 			RXDMA_BUF, 1, i,
2921 			wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx))) {
2922 			QDF_TRACE(QDF_MODULE_ID_DP,
2923 				 QDF_TRACE_LEVEL_ERROR,
2924 				 FL("failed rx mac ring setup"));
2925 			return QDF_STATUS_E_FAILURE;
2926 		}
2927 	}
2928 	return QDF_STATUS_SUCCESS;
2929 }
2930 #else
2931 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2932 	 struct dp_pdev *pdev)
2933 {
2934 	return QDF_STATUS_SUCCESS;
2935 }
2936 #endif
2937 
2938 /**
2939  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2940  * @pdev - DP_PDEV handle
2941  *
2942  * Return: void
2943  */
2944 static inline void
2945 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2946 {
2947 	uint8_t map_id;
2948 	struct dp_soc *soc = pdev->soc;
2949 
2950 	if (!soc)
2951 		return;
2952 
2953 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2954 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
2955 			     default_dscp_tid_map,
2956 			     sizeof(default_dscp_tid_map));
2957 	}
2958 
2959 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
2960 		hal_tx_set_dscp_tid_map(soc->hal_soc,
2961 					default_dscp_tid_map,
2962 					map_id);
2963 	}
2964 }
2965 
2966 #ifdef IPA_OFFLOAD
2967 /**
2968  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
2969  * @soc: data path instance
2970  * @pdev: core txrx pdev context
2971  *
2972  * Return: QDF_STATUS_SUCCESS: success
2973  *         QDF_STATUS_E_RESOURCES: Error return
2974  */
2975 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2976 					   struct dp_pdev *pdev)
2977 {
2978 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2979 	int entries;
2980 
2981 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2982 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
2983 
2984 	/* Setup second Rx refill buffer ring */
2985 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2986 			  IPA_RX_REFILL_BUF_RING_IDX,
2987 			  pdev->pdev_id,
2988 			  entries)) {
2989 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2990 			FL("dp_srng_setup failed second rx refill ring"));
2991 		return QDF_STATUS_E_FAILURE;
2992 	}
2993 	return QDF_STATUS_SUCCESS;
2994 }
2995 
2996 /**
2997  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
2998  * @soc: data path instance
2999  * @pdev: core txrx pdev context
3000  *
3001  * Return: void
3002  */
3003 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3004 					      struct dp_pdev *pdev)
3005 {
3006 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3007 			IPA_RX_REFILL_BUF_RING_IDX);
3008 }
3009 
3010 #else
3011 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3012 					   struct dp_pdev *pdev)
3013 {
3014 	return QDF_STATUS_SUCCESS;
3015 }
3016 
3017 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3018 					      struct dp_pdev *pdev)
3019 {
3020 }
3021 #endif
3022 
3023 #if !defined(DISABLE_MON_CONFIG)
3024 /**
3025  * dp_mon_rings_setup() - Initialize Monitor rings based on target
3026  * @soc: soc handle
3027  * @pdev: physical device handle
3028  *
3029  * Return: nonzero on failure and zero on success
3030  */
3031 static
3032 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3033 {
3034 	int mac_id = 0;
3035 	int pdev_id = pdev->pdev_id;
3036 	int entries;
3037 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3038 
3039 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3040 
3041 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3042 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
3043 
3044 		if (soc->wlan_cfg_ctx->rxdma1_enable) {
3045 			entries =
3046 			   wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
3047 			if (dp_srng_setup(soc,
3048 					  &pdev->rxdma_mon_buf_ring[mac_id],
3049 					  RXDMA_MONITOR_BUF, 0, mac_for_pdev,
3050 					  entries)) {
3051 				QDF_TRACE(QDF_MODULE_ID_DP,
3052 					  QDF_TRACE_LEVEL_ERROR,
3053 					  FL(RNG_ERR "rxdma_mon_buf_ring "));
3054 				return QDF_STATUS_E_NOMEM;
3055 			}
3056 
3057 			entries =
3058 			   wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
3059 			if (dp_srng_setup(soc,
3060 					  &pdev->rxdma_mon_dst_ring[mac_id],
3061 					  RXDMA_MONITOR_DST, 0, mac_for_pdev,
3062 					  entries)) {
3063 				QDF_TRACE(QDF_MODULE_ID_DP,
3064 					  QDF_TRACE_LEVEL_ERROR,
3065 					  FL(RNG_ERR "rxdma_mon_dst_ring"));
3066 				return QDF_STATUS_E_NOMEM;
3067 			}
3068 
3069 			entries =
3070 			    wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3071 			if (dp_srng_setup(soc,
3072 					  &pdev->rxdma_mon_status_ring[mac_id],
3073 					  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
3074 					  entries)) {
3075 				QDF_TRACE(QDF_MODULE_ID_DP,
3076 					  QDF_TRACE_LEVEL_ERROR,
3077 					  FL(RNG_ERR "rxdma_mon_status_ring"));
3078 				return QDF_STATUS_E_NOMEM;
3079 			}
3080 
3081 			entries =
3082 			   wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
3083 			if (dp_srng_setup(soc,
3084 					  &pdev->rxdma_mon_desc_ring[mac_id],
3085 					  RXDMA_MONITOR_DESC, 0, mac_for_pdev,
3086 					  entries)) {
3087 				QDF_TRACE(QDF_MODULE_ID_DP,
3088 					  QDF_TRACE_LEVEL_ERROR,
3089 					  FL(RNG_ERR "rxdma_mon_desc_ring"));
3090 				return QDF_STATUS_E_NOMEM;
3091 			}
3092 		} else {
3093 			entries =
3094 			   wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3095 			if (dp_srng_setup(soc,
3096 					  &pdev->rxdma_mon_status_ring[mac_id],
3097 					  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
3098 					  entries)) {
3099 				QDF_TRACE(QDF_MODULE_ID_DP,
3100 					  QDF_TRACE_LEVEL_ERROR,
3101 					  FL(RNG_ERR "rxdma_mon_status_ring"));
3102 				return QDF_STATUS_E_NOMEM;
3103 			}
3104 		}
3105 	}
3106 
3107 	return QDF_STATUS_SUCCESS;
3108 }
3109 #else
3110 static
3111 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3112 {
3113 	return QDF_STATUS_SUCCESS;
3114 }
3115 #endif
3116 
3117 /*dp_iterate_update_peer_list - update peer stats on cal client timer
3118  * @pdev_hdl: pdev handle
3119  */
3120 #ifdef ATH_SUPPORT_EXT_STAT
3121 void  dp_iterate_update_peer_list(void *pdev_hdl)
3122 {
3123 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
3124 	struct dp_soc *soc = pdev->soc;
3125 	struct dp_vdev *vdev = NULL;
3126 	struct dp_peer *peer = NULL;
3127 
3128 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3129 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3130 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
3131 		DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
3132 			dp_cal_client_update_peer_stats(&peer->stats);
3133 		}
3134 	}
3135 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3136 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3137 }
3138 #else
3139 void  dp_iterate_update_peer_list(void *pdev_hdl)
3140 {
3141 }
3142 #endif
3143 
3144 /*
3145 * dp_pdev_attach_wifi3() - attach txrx pdev
3146 * @ctrl_pdev: Opaque PDEV object
3147 * @txrx_soc: Datapath SOC handle
3148 * @htc_handle: HTC handle for host-target interface
3149 * @qdf_osdev: QDF OS device
3150 * @pdev_id: PDEV ID
3151 *
3152 * Return: DP PDEV handle on success, NULL on failure
3153 */
3154 static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
3155 	struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
3156 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
3157 {
3158 	int tx_ring_size;
3159 	int tx_comp_ring_size;
3160 	int reo_dst_ring_size;
3161 	int entries;
3162 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3163 	int nss_cfg;
3164 
3165 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3166 	struct dp_pdev *pdev = NULL;
3167 
3168 	if (soc->dp_soc_reinit)
3169 		pdev = soc->pdev_list[pdev_id];
3170 	else
3171 		pdev = qdf_mem_malloc(sizeof(*pdev));
3172 
3173 	if (!pdev) {
3174 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3175 			FL("DP PDEV memory allocation failed"));
3176 		goto fail0;
3177 	}
3178 
3179 	/*
3180 	 * Variable to prevent double pdev deinitialization during
3181 	 * radio detach execution .i.e. in the absence of any vdev.
3182 	 */
3183 	pdev->pdev_deinit = 0;
3184 	pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
3185 
3186 	if (!pdev->invalid_peer) {
3187 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3188 			  FL("Invalid peer memory allocation failed"));
3189 		qdf_mem_free(pdev);
3190 		goto fail0;
3191 	}
3192 
3193 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3194 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
3195 
3196 	if (!pdev->wlan_cfg_ctx) {
3197 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3198 			FL("pdev cfg_attach failed"));
3199 
3200 		qdf_mem_free(pdev->invalid_peer);
3201 		qdf_mem_free(pdev);
3202 		goto fail0;
3203 	}
3204 
3205 	/*
3206 	 * set nss pdev config based on soc config
3207 	 */
3208 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
3209 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
3210 			(nss_cfg & (1 << pdev_id)));
3211 
3212 	pdev->soc = soc;
3213 	pdev->ctrl_pdev = ctrl_pdev;
3214 	pdev->pdev_id = pdev_id;
3215 	soc->pdev_list[pdev_id] = pdev;
3216 
3217 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
3218 	soc->pdev_count++;
3219 
3220 	TAILQ_INIT(&pdev->vdev_list);
3221 	qdf_spinlock_create(&pdev->vdev_list_lock);
3222 	pdev->vdev_count = 0;
3223 
3224 	qdf_spinlock_create(&pdev->tx_mutex);
3225 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
3226 	TAILQ_INIT(&pdev->neighbour_peers_list);
3227 	pdev->neighbour_peers_added = false;
3228 	pdev->monitor_configured = false;
3229 
3230 	if (dp_soc_cmn_setup(soc)) {
3231 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3232 			FL("dp_soc_cmn_setup failed"));
3233 		goto fail1;
3234 	}
3235 
3236 	/* Setup per PDEV TCL rings if configured */
3237 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3238 		tx_ring_size =
3239 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
3240 		tx_comp_ring_size =
3241 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
3242 
3243 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
3244 			pdev_id, pdev_id, tx_ring_size)) {
3245 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3246 				FL("dp_srng_setup failed for tcl_data_ring"));
3247 			goto fail1;
3248 		}
3249 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
3250 			WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
3251 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3252 				FL("dp_srng_setup failed for tx_comp_ring"));
3253 			goto fail1;
3254 		}
3255 		soc->num_tcl_data_rings++;
3256 	}
3257 
3258 	/* Tx specific init */
3259 	if (dp_tx_pdev_attach(pdev)) {
3260 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3261 			FL("dp_tx_pdev_attach failed"));
3262 		goto fail1;
3263 	}
3264 
3265 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
3266 	/* Setup per PDEV REO rings if configured */
3267 	if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
3268 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
3269 			pdev_id, pdev_id, reo_dst_ring_size)) {
3270 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3271 				FL("dp_srng_setup failed for reo_dest_ringn"));
3272 			goto fail1;
3273 		}
3274 		soc->num_reo_dest_rings++;
3275 
3276 	}
3277 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
3278 		wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx))) {
3279 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3280 			 FL("dp_srng_setup failed rx refill ring"));
3281 		goto fail1;
3282 	}
3283 
3284 	if (dp_rxdma_ring_setup(soc, pdev)) {
3285 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3286 			 FL("RXDMA ring config failed"));
3287 		goto fail1;
3288 	}
3289 
3290 	if (dp_mon_rings_setup(soc, pdev)) {
3291 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3292 			  FL("MONITOR rings setup failed"));
3293 		goto fail1;
3294 	}
3295 
3296 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
3297 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
3298 		if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
3299 				  0, pdev_id,
3300 				  entries)) {
3301 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3302 				  FL(RNG_ERR "rxdma_err_dst_ring"));
3303 			goto fail1;
3304 		}
3305 	}
3306 
3307 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
3308 		goto fail1;
3309 
3310 	if (dp_ipa_ring_resource_setup(soc, pdev))
3311 		goto fail1;
3312 
3313 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
3314 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3315 			FL("dp_ipa_uc_attach failed"));
3316 		goto fail1;
3317 	}
3318 
3319 	/* Rx specific init */
3320 	if (dp_rx_pdev_attach(pdev)) {
3321 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3322 			  FL("dp_rx_pdev_attach failed"));
3323 		goto fail1;
3324 	}
3325 
3326 	DP_STATS_INIT(pdev);
3327 
3328 	/* Monitor filter init */
3329 	pdev->mon_filter_mode = MON_FILTER_ALL;
3330 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
3331 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
3332 	pdev->fp_data_filter = FILTER_DATA_ALL;
3333 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
3334 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
3335 	pdev->mo_data_filter = FILTER_DATA_ALL;
3336 
3337 	dp_local_peer_id_pool_init(pdev);
3338 
3339 	dp_dscp_tid_map_setup(pdev);
3340 
3341 	/* Rx monitor mode specific init */
3342 	if (dp_rx_pdev_mon_attach(pdev)) {
3343 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3344 				"dp_rx_pdev_mon_attach failed");
3345 		goto fail1;
3346 	}
3347 
3348 	if (dp_wdi_event_attach(pdev)) {
3349 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3350 				"dp_wdi_evet_attach failed");
3351 		goto fail1;
3352 	}
3353 
3354 	/* set the reo destination during initialization */
3355 	pdev->reo_dest = pdev->pdev_id + 1;
3356 
3357 	/*
3358 	 * initialize ppdu tlv list
3359 	 */
3360 	TAILQ_INIT(&pdev->ppdu_info_list);
3361 	pdev->tlv_count = 0;
3362 	pdev->list_depth = 0;
3363 
3364 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
3365 
3366 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
3367 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
3368 			      TRUE);
3369 
3370 	/* initlialize cal client timer */
3371 	dp_cal_client_attach(&pdev->cal_client_ctx, pdev, pdev->soc->osdev,
3372 			     &dp_iterate_update_peer_list);
3373 
3374 	return (struct cdp_pdev *)pdev;
3375 
3376 fail1:
3377 	dp_pdev_detach((struct cdp_pdev *)pdev, 0);
3378 
3379 fail0:
3380 	return NULL;
3381 }
3382 
3383 /*
3384 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
3385 * @soc: data path SoC handle
3386 * @pdev: Physical device handle
3387 *
3388 * Return: void
3389 */
3390 #ifdef QCA_HOST2FW_RXBUF_RING
3391 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3392 	 struct dp_pdev *pdev)
3393 {
3394 	int max_mac_rings =
3395 		 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
3396 	int i;
3397 
3398 	max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
3399 				max_mac_rings : MAX_RX_MAC_RINGS;
3400 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
3401 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
3402 			 RXDMA_BUF, 1);
3403 
3404 	qdf_timer_free(&soc->mon_reap_timer);
3405 }
3406 #else
3407 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3408 	 struct dp_pdev *pdev)
3409 {
3410 }
3411 #endif
3412 
3413 /*
3414  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3415  * @pdev: device object
3416  *
3417  * Return: void
3418  */
3419 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3420 {
3421 	struct dp_neighbour_peer *peer = NULL;
3422 	struct dp_neighbour_peer *temp_peer = NULL;
3423 
3424 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3425 			neighbour_peer_list_elem, temp_peer) {
3426 		/* delete this peer from the list */
3427 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
3428 				peer, neighbour_peer_list_elem);
3429 		qdf_mem_free(peer);
3430 	}
3431 
3432 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3433 }
3434 
3435 /**
3436 * dp_htt_ppdu_stats_detach() - detach stats resources
3437 * @pdev: Datapath PDEV handle
3438 *
3439 * Return: void
3440 */
3441 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3442 {
3443 	struct ppdu_info *ppdu_info, *ppdu_info_next;
3444 
3445 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3446 			ppdu_info_list_elem, ppdu_info_next) {
3447 		if (!ppdu_info)
3448 			break;
3449 		qdf_assert_always(ppdu_info->nbuf);
3450 		qdf_nbuf_free(ppdu_info->nbuf);
3451 		qdf_mem_free(ppdu_info);
3452 	}
3453 }
3454 
3455 #if !defined(DISABLE_MON_CONFIG)
3456 
3457 static
3458 void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
3459 			 int mac_id)
3460 {
3461 		if (soc->wlan_cfg_ctx->rxdma1_enable) {
3462 			dp_srng_cleanup(soc,
3463 					&pdev->rxdma_mon_buf_ring[mac_id],
3464 					RXDMA_MONITOR_BUF, 0);
3465 
3466 			dp_srng_cleanup(soc,
3467 					&pdev->rxdma_mon_dst_ring[mac_id],
3468 					RXDMA_MONITOR_DST, 0);
3469 
3470 			dp_srng_cleanup(soc,
3471 					&pdev->rxdma_mon_status_ring[mac_id],
3472 					RXDMA_MONITOR_STATUS, 0);
3473 
3474 			dp_srng_cleanup(soc,
3475 					&pdev->rxdma_mon_desc_ring[mac_id],
3476 					RXDMA_MONITOR_DESC, 0);
3477 
3478 			dp_srng_cleanup(soc,
3479 					&pdev->rxdma_err_dst_ring[mac_id],
3480 					RXDMA_DST, 0);
3481 		} else {
3482 			dp_srng_cleanup(soc,
3483 					&pdev->rxdma_mon_status_ring[mac_id],
3484 					RXDMA_MONITOR_STATUS, 0);
3485 
3486 			dp_srng_cleanup(soc,
3487 					&pdev->rxdma_err_dst_ring[mac_id],
3488 					RXDMA_DST, 0);
3489 		}
3490 
3491 }
3492 #else
3493 static void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
3494 				int mac_id)
3495 {
3496 }
3497 #endif
3498 
3499 /**
3500  * dp_mon_ring_deinit() - Placeholder to deinitialize Monitor rings
3501  *
3502  * @soc: soc handle
3503  * @pdev: datapath physical dev handle
3504  * @mac_id: mac number
3505  *
3506  * Return: None
3507  */
3508 static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3509 			       int mac_id)
3510 {
3511 }
3512 
3513 /**
3514  * dp_pdev_mem_reset() - Reset txrx pdev memory
3515  * @pdev: dp pdev handle
3516  *
3517  * Return: None
3518  */
3519 static void dp_pdev_mem_reset(struct dp_pdev *pdev)
3520 {
3521 	uint16_t len = 0;
3522 	uint8_t *dp_pdev_offset = (uint8_t *)pdev;
3523 
3524 	len = sizeof(struct dp_pdev) -
3525 		offsetof(struct dp_pdev, pdev_deinit) -
3526 		sizeof(pdev->pdev_deinit);
3527 	dp_pdev_offset = dp_pdev_offset +
3528 			 offsetof(struct dp_pdev, pdev_deinit) +
3529 			 sizeof(pdev->pdev_deinit);
3530 
3531 	qdf_mem_zero(dp_pdev_offset, len);
3532 }
3533 
3534 /**
3535  * dp_pdev_deinit() - Deinit txrx pdev
3536  * @txrx_pdev: Datapath PDEV handle
3537  * @force: Force deinit
3538  *
3539  * Return: None
3540  */
3541 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
3542 {
3543 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3544 	struct dp_soc *soc = pdev->soc;
3545 	qdf_nbuf_t curr_nbuf, next_nbuf;
3546 	int mac_id;
3547 
3548 	/*
3549 	 * Prevent double pdev deinitialization during radio detach
3550 	 * execution .i.e. in the absence of any vdev
3551 	 */
3552 	if (pdev->pdev_deinit)
3553 		return;
3554 
3555 	pdev->pdev_deinit = 1;
3556 
3557 	dp_wdi_event_detach(pdev);
3558 
3559 	dp_tx_pdev_detach(pdev);
3560 
3561 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3562 		dp_srng_deinit(soc, &soc->tcl_data_ring[pdev->pdev_id],
3563 			       TCL_DATA, pdev->pdev_id);
3564 		dp_srng_deinit(soc, &soc->tx_comp_ring[pdev->pdev_id],
3565 			       WBM2SW_RELEASE, pdev->pdev_id);
3566 	}
3567 
3568 	dp_pktlogmod_exit(pdev);
3569 
3570 	dp_rx_pdev_detach(pdev);
3571 	dp_rx_pdev_mon_detach(pdev);
3572 	dp_neighbour_peers_detach(pdev);
3573 	qdf_spinlock_destroy(&pdev->tx_mutex);
3574 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
3575 
3576 	dp_ipa_uc_detach(soc, pdev);
3577 
3578 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
3579 
3580 	/* Cleanup per PDEV REO rings if configured */
3581 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3582 		dp_srng_deinit(soc, &soc->reo_dest_ring[pdev->pdev_id],
3583 			       REO_DST, pdev->pdev_id);
3584 	}
3585 
3586 	dp_srng_deinit(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
3587 
3588 	dp_rxdma_ring_cleanup(soc, pdev);
3589 
3590 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3591 		dp_mon_ring_deinit(soc, pdev, mac_id);
3592 		dp_srng_deinit(soc, &pdev->rxdma_err_dst_ring[mac_id],
3593 			       RXDMA_DST, 0);
3594 	}
3595 
3596 	curr_nbuf = pdev->invalid_peer_head_msdu;
3597 	while (curr_nbuf) {
3598 		next_nbuf = qdf_nbuf_next(curr_nbuf);
3599 		qdf_nbuf_free(curr_nbuf);
3600 		curr_nbuf = next_nbuf;
3601 	}
3602 	pdev->invalid_peer_head_msdu = NULL;
3603 	pdev->invalid_peer_tail_msdu = NULL;
3604 
3605 	dp_htt_ppdu_stats_detach(pdev);
3606 
3607 	qdf_nbuf_free(pdev->sojourn_buf);
3608 
3609 	dp_cal_client_detach(&pdev->cal_client_ctx);
3610 
3611 	soc->pdev_count--;
3612 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
3613 	qdf_mem_free(pdev->invalid_peer);
3614 	qdf_mem_free(pdev->dp_txrx_handle);
3615 	dp_pdev_mem_reset(pdev);
3616 }
3617 
3618 /**
3619  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
3620  * @txrx_pdev: Datapath PDEV handle
3621  * @force: Force deinit
3622  *
3623  * Return: None
3624  */
3625 static void dp_pdev_deinit_wifi3(struct cdp_pdev *txrx_pdev, int force)
3626 {
3627 	dp_pdev_deinit(txrx_pdev, force);
3628 }
3629 
3630 /*
3631  * dp_pdev_detach() - Complete rest of pdev detach
3632  * @txrx_pdev: Datapath PDEV handle
3633  * @force: Force deinit
3634  *
3635  * Return: None
3636  */
3637 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
3638 {
3639 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3640 	struct dp_soc *soc = pdev->soc;
3641 	int mac_id;
3642 
3643 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3644 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
3645 				TCL_DATA, pdev->pdev_id);
3646 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
3647 				WBM2SW_RELEASE, pdev->pdev_id);
3648 	}
3649 
3650 	dp_mon_link_free(pdev);
3651 
3652 	/* Cleanup per PDEV REO rings if configured */
3653 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3654 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
3655 				REO_DST, pdev->pdev_id);
3656 	}
3657 
3658 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
3659 
3660 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3661 		dp_mon_ring_cleanup(soc, pdev, mac_id);
3662 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3663 				RXDMA_DST, 0);
3664 	}
3665 
3666 	soc->pdev_list[pdev->pdev_id] = NULL;
3667 	qdf_mem_free(pdev);
3668 }
3669 
3670 /*
3671  * dp_pdev_detach_wifi3() - detach txrx pdev
3672  * @txrx_pdev: Datapath PDEV handle
3673  * @force: Force detach
3674  *
3675  * Return: None
3676  */
3677 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
3678 {
3679 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3680 	struct dp_soc *soc = pdev->soc;
3681 
3682 	if (soc->dp_soc_reinit) {
3683 		dp_pdev_detach(txrx_pdev, force);
3684 	} else {
3685 		dp_pdev_deinit(txrx_pdev, force);
3686 		dp_pdev_detach(txrx_pdev, force);
3687 	}
3688 }
3689 
3690 /*
3691  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
3692  * @soc: DP SOC handle
3693  */
3694 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
3695 {
3696 	struct reo_desc_list_node *desc;
3697 	struct dp_rx_tid *rx_tid;
3698 
3699 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3700 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
3701 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3702 		rx_tid = &desc->rx_tid;
3703 		qdf_mem_unmap_nbytes_single(soc->osdev,
3704 			rx_tid->hw_qdesc_paddr,
3705 			QDF_DMA_BIDIRECTIONAL,
3706 			rx_tid->hw_qdesc_alloc_size);
3707 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3708 		qdf_mem_free(desc);
3709 	}
3710 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3711 	qdf_list_destroy(&soc->reo_desc_freelist);
3712 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
3713 }
3714 
3715 /**
3716  * dp_soc_mem_reset() - Reset Dp Soc memory
3717  * @soc: DP handle
3718  *
3719  * Return: None
3720  */
3721 static void dp_soc_mem_reset(struct dp_soc *soc)
3722 {
3723 	uint16_t len = 0;
3724 	uint8_t *dp_soc_offset = (uint8_t *)soc;
3725 
3726 	len = sizeof(struct dp_soc) -
3727 		offsetof(struct dp_soc, dp_soc_reinit) -
3728 		sizeof(soc->dp_soc_reinit);
3729 	dp_soc_offset = dp_soc_offset +
3730 			offsetof(struct dp_soc, dp_soc_reinit) +
3731 			sizeof(soc->dp_soc_reinit);
3732 
3733 	qdf_mem_zero(dp_soc_offset, len);
3734 }
3735 
3736 /**
3737  * dp_soc_deinit() - Deinitialize txrx SOC
3738  * @txrx_soc: Opaque DP SOC handle
3739  *
3740  * Return: None
3741  */
3742 static void dp_soc_deinit(void *txrx_soc)
3743 {
3744 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3745 	int i;
3746 
3747 	qdf_atomic_set(&soc->cmn_init_done, 0);
3748 
3749 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3750 		if (soc->pdev_list[i])
3751 			dp_pdev_deinit((struct cdp_pdev *)
3752 					soc->pdev_list[i], 1);
3753 	}
3754 
3755 	qdf_flush_work(&soc->htt_stats.work);
3756 	qdf_disable_work(&soc->htt_stats.work);
3757 
3758 	/* Free pending htt stats messages */
3759 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
3760 
3761 	dp_reo_cmdlist_destroy(soc);
3762 
3763 	dp_peer_find_detach(soc);
3764 
3765 	/* Free the ring memories */
3766 	/* Common rings */
3767 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3768 
3769 	/* Tx data rings */
3770 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3771 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3772 			dp_srng_deinit(soc, &soc->tcl_data_ring[i],
3773 				       TCL_DATA, i);
3774 			dp_srng_deinit(soc, &soc->tx_comp_ring[i],
3775 				       WBM2SW_RELEASE, i);
3776 		}
3777 	}
3778 
3779 	/* TCL command and status rings */
3780 	dp_srng_deinit(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3781 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3782 
3783 	/* Rx data rings */
3784 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3785 		soc->num_reo_dest_rings =
3786 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3787 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
3788 			/* TODO: Get number of rings and ring sizes
3789 			 * from wlan_cfg
3790 			 */
3791 			dp_srng_deinit(soc, &soc->reo_dest_ring[i],
3792 				       REO_DST, i);
3793 		}
3794 	}
3795 	/* REO reinjection ring */
3796 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3797 
3798 	/* Rx release ring */
3799 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3800 
3801 	/* Rx exception ring */
3802 	/* TODO: Better to store ring_type and ring_num in
3803 	 * dp_srng during setup
3804 	 */
3805 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3806 
3807 	/* REO command and status rings */
3808 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3809 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
3810 
3811 	dp_soc_wds_detach(soc);
3812 
3813 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
3814 	qdf_spinlock_destroy(&soc->htt_stats.lock);
3815 
3816 	htt_soc_htc_dealloc(soc->htt_handle);
3817 
3818 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3819 
3820 	dp_reo_cmdlist_destroy(soc);
3821 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3822 	dp_reo_desc_freelist_destroy(soc);
3823 
3824 	qdf_spinlock_destroy(&soc->ast_lock);
3825 
3826 	dp_soc_mem_reset(soc);
3827 }
3828 
3829 /**
3830  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
3831  * @txrx_soc: Opaque DP SOC handle
3832  *
3833  * Return: None
3834  */
3835 static void dp_soc_deinit_wifi3(void *txrx_soc)
3836 {
3837 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3838 
3839 	soc->dp_soc_reinit = 1;
3840 	dp_soc_deinit(txrx_soc);
3841 }
3842 
3843 /*
3844  * dp_soc_detach() - Detach rest of txrx SOC
3845  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
3846  *
3847  * Return: None
3848  */
3849 static void dp_soc_detach(void *txrx_soc)
3850 {
3851 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3852 	int i;
3853 
3854 	qdf_atomic_set(&soc->cmn_init_done, 0);
3855 
3856 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
3857 	 * SW descriptors
3858 	 */
3859 
3860 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3861 		if (soc->pdev_list[i])
3862 			dp_pdev_detach((struct cdp_pdev *)
3863 					     soc->pdev_list[i], 1);
3864 	}
3865 
3866 	/* Free the ring memories */
3867 	/* Common rings */
3868 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3869 
3870 	dp_tx_soc_detach(soc);
3871 
3872 	/* Tx data rings */
3873 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3874 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3875 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
3876 				TCL_DATA, i);
3877 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
3878 				WBM2SW_RELEASE, i);
3879 		}
3880 	}
3881 
3882 	/* TCL command and status rings */
3883 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3884 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3885 
3886 	/* Rx data rings */
3887 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3888 		soc->num_reo_dest_rings =
3889 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3890 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
3891 			/* TODO: Get number of rings and ring sizes
3892 			 * from wlan_cfg
3893 			 */
3894 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
3895 				REO_DST, i);
3896 		}
3897 	}
3898 	/* REO reinjection ring */
3899 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3900 
3901 	/* Rx release ring */
3902 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3903 
3904 	/* Rx exception ring */
3905 	/* TODO: Better to store ring_type and ring_num in
3906 	 * dp_srng during setup
3907 	 */
3908 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3909 
3910 	/* REO command and status rings */
3911 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3912 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
3913 	dp_hw_link_desc_pool_cleanup(soc);
3914 
3915 	htt_soc_detach(soc->htt_handle);
3916 	soc->dp_soc_reinit = 0;
3917 
3918 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
3919 
3920 	qdf_mem_free(soc);
3921 }
3922 
3923 /*
3924  * dp_soc_detach_wifi3() - Detach txrx SOC
3925  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
3926  *
3927  * Return: None
3928  */
3929 static void dp_soc_detach_wifi3(void *txrx_soc)
3930 {
3931 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3932 
3933 	if (soc->dp_soc_reinit) {
3934 		dp_soc_detach(txrx_soc);
3935 	} else {
3936 		dp_soc_deinit(txrx_soc);
3937 		dp_soc_detach(txrx_soc);
3938 	}
3939 
3940 }
3941 
3942 #if !defined(DISABLE_MON_CONFIG)
3943 /**
3944  * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
3945  * @soc: soc handle
3946  * @pdev: physical device handle
3947  * @mac_id: ring number
3948  * @mac_for_pdev: mac_id
3949  *
3950  * Return: non-zero for failure, zero for success
3951  */
3952 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
3953 					struct dp_pdev *pdev,
3954 					int mac_id,
3955 					int mac_for_pdev)
3956 {
3957 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3958 
3959 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
3960 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
3961 					pdev->rxdma_mon_buf_ring[mac_id]
3962 					.hal_srng,
3963 					RXDMA_MONITOR_BUF);
3964 
3965 		if (status != QDF_STATUS_SUCCESS) {
3966 			dp_err("Failed to send htt srng setup message for Rxdma mon buf ring");
3967 			return status;
3968 		}
3969 
3970 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
3971 					pdev->rxdma_mon_dst_ring[mac_id]
3972 					.hal_srng,
3973 					RXDMA_MONITOR_DST);
3974 
3975 		if (status != QDF_STATUS_SUCCESS) {
3976 			dp_err("Failed to send htt srng setup message for Rxdma mon dst ring");
3977 			return status;
3978 		}
3979 
3980 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
3981 					pdev->rxdma_mon_status_ring[mac_id]
3982 					.hal_srng,
3983 					RXDMA_MONITOR_STATUS);
3984 
3985 		if (status != QDF_STATUS_SUCCESS) {
3986 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
3987 			return status;
3988 		}
3989 
3990 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
3991 					pdev->rxdma_mon_desc_ring[mac_id]
3992 					.hal_srng,
3993 					RXDMA_MONITOR_DESC);
3994 
3995 		if (status != QDF_STATUS_SUCCESS) {
3996 			dp_err("Failed to send htt srng message for Rxdma mon desc ring");
3997 			return status;
3998 		}
3999 	} else {
4000 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4001 					pdev->rxdma_mon_status_ring[mac_id]
4002 					.hal_srng,
4003 					RXDMA_MONITOR_STATUS);
4004 
4005 		if (status != QDF_STATUS_SUCCESS) {
4006 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4007 			return status;
4008 		}
4009 	}
4010 
4011 	return status;
4012 
4013 }
4014 #else
4015 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4016 					struct dp_pdev *pdev,
4017 					int mac_id,
4018 					int mac_for_pdev)
4019 {
4020 	return QDF_STATUS_SUCCESS;
4021 }
4022 #endif
4023 
4024 /*
4025  * dp_rxdma_ring_config() - configure the RX DMA rings
4026  *
4027  * This function is used to configure the MAC rings.
4028  * On MCL host provides buffers in Host2FW ring
4029  * FW refills (copies) buffers to the ring and updates
4030  * ring_idx in register
4031  *
4032  * @soc: data path SoC handle
4033  *
4034  * Return: zero on success, non-zero on failure
4035  */
4036 #ifdef QCA_HOST2FW_RXBUF_RING
4037 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4038 {
4039 	int i;
4040 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4041 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4042 		struct dp_pdev *pdev = soc->pdev_list[i];
4043 
4044 		if (pdev) {
4045 			int mac_id;
4046 			bool dbs_enable = 0;
4047 			int max_mac_rings =
4048 				 wlan_cfg_get_num_mac_rings
4049 				(pdev->wlan_cfg_ctx);
4050 
4051 			htt_srng_setup(soc->htt_handle, 0,
4052 				 pdev->rx_refill_buf_ring.hal_srng,
4053 				 RXDMA_BUF);
4054 
4055 			if (pdev->rx_refill_buf_ring2.hal_srng)
4056 				htt_srng_setup(soc->htt_handle, 0,
4057 					pdev->rx_refill_buf_ring2.hal_srng,
4058 					RXDMA_BUF);
4059 
4060 			if (soc->cdp_soc.ol_ops->
4061 				is_hw_dbs_2x2_capable) {
4062 				dbs_enable = soc->cdp_soc.ol_ops->
4063 					is_hw_dbs_2x2_capable(soc->ctrl_psoc);
4064 			}
4065 
4066 			if (dbs_enable) {
4067 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4068 				QDF_TRACE_LEVEL_ERROR,
4069 				FL("DBS enabled max_mac_rings %d"),
4070 					 max_mac_rings);
4071 			} else {
4072 				max_mac_rings = 1;
4073 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4074 					 QDF_TRACE_LEVEL_ERROR,
4075 					 FL("DBS disabled, max_mac_rings %d"),
4076 					 max_mac_rings);
4077 			}
4078 
4079 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4080 					 FL("pdev_id %d max_mac_rings %d"),
4081 					 pdev->pdev_id, max_mac_rings);
4082 
4083 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
4084 				int mac_for_pdev = dp_get_mac_id_for_pdev(
4085 							mac_id, pdev->pdev_id);
4086 
4087 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4088 					 QDF_TRACE_LEVEL_ERROR,
4089 					 FL("mac_id %d"), mac_for_pdev);
4090 
4091 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4092 					 pdev->rx_mac_buf_ring[mac_id]
4093 						.hal_srng,
4094 					 RXDMA_BUF);
4095 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4096 					pdev->rxdma_err_dst_ring[mac_id]
4097 						.hal_srng,
4098 					RXDMA_DST);
4099 
4100 				/* Configure monitor mode rings */
4101 				status = dp_mon_htt_srng_setup(soc, pdev,
4102 							       mac_id,
4103 							       mac_for_pdev);
4104 				if (status != QDF_STATUS_SUCCESS) {
4105 					dp_err("Failed to send htt monitor messages to target");
4106 					return status;
4107 				}
4108 
4109 			}
4110 		}
4111 	}
4112 
4113 	/*
4114 	 * Timer to reap rxdma status rings.
4115 	 * Needed until we enable ppdu end interrupts
4116 	 */
4117 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
4118 			dp_service_mon_rings, (void *)soc,
4119 			QDF_TIMER_TYPE_WAKE_APPS);
4120 	soc->reap_timer_init = 1;
4121 	return status;
4122 }
4123 #else
4124 /* This is only for WIN */
4125 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4126 {
4127 	int i;
4128 	int mac_id;
4129 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4130 
4131 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4132 		struct dp_pdev *pdev = soc->pdev_list[i];
4133 
4134 		if (pdev == NULL)
4135 			continue;
4136 
4137 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4138 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
4139 
4140 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4141 				pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
4142 #ifndef DISABLE_MON_CONFIG
4143 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4144 				pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4145 				RXDMA_MONITOR_BUF);
4146 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4147 				pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
4148 				RXDMA_MONITOR_DST);
4149 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4150 				pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4151 				RXDMA_MONITOR_STATUS);
4152 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4153 				pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
4154 				RXDMA_MONITOR_DESC);
4155 #endif
4156 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4157 				pdev->rxdma_err_dst_ring[mac_id].hal_srng,
4158 				RXDMA_DST);
4159 		}
4160 	}
4161 	return status;
4162 }
4163 #endif
4164 
4165 /*
4166  * dp_soc_attach_target_wifi3() - SOC initialization in the target
4167  * @cdp_soc: Opaque Datapath SOC handle
4168  *
4169  * Return: zero on success, non-zero on failure
4170  */
4171 static QDF_STATUS
4172 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
4173 {
4174 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4175 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4176 
4177 	htt_soc_attach_target(soc->htt_handle);
4178 
4179 	status = dp_rxdma_ring_config(soc);
4180 	if (status != QDF_STATUS_SUCCESS) {
4181 		dp_err("Failed to send htt srng setup messages to target");
4182 		return status;
4183 	}
4184 
4185 	DP_STATS_INIT(soc);
4186 
4187 	/* initialize work queue for stats processing */
4188 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
4189 
4190 	return QDF_STATUS_SUCCESS;
4191 }
4192 
4193 /*
4194  * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
4195  * @txrx_soc: Datapath SOC handle
4196  */
4197 static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
4198 {
4199 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
4200 	return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
4201 }
4202 /*
4203  * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
4204  * @txrx_soc: Datapath SOC handle
4205  * @nss_cfg: nss config
4206  */
4207 static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
4208 {
4209 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
4210 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
4211 
4212 	wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
4213 
4214 	/*
4215 	 * TODO: masked out based on the per offloaded radio
4216 	 */
4217 	switch (config) {
4218 	case dp_nss_cfg_default:
4219 		break;
4220 	case dp_nss_cfg_dbdc:
4221 	case dp_nss_cfg_dbtc:
4222 		wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
4223 		wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
4224 		wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
4225 		wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
4226 		break;
4227 	default:
4228 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4229 			  "Invalid offload config %d", config);
4230 	}
4231 
4232 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4233 		  FL("nss-wifi<0> nss config is enabled"));
4234 }
4235 /*
4236 * dp_vdev_attach_wifi3() - attach txrx vdev
4237 * @txrx_pdev: Datapath PDEV handle
4238 * @vdev_mac_addr: MAC address of the virtual interface
4239 * @vdev_id: VDEV Id
4240 * @wlan_op_mode: VDEV operating mode
4241 *
4242 * Return: DP VDEV handle on success, NULL on failure
4243 */
4244 static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
4245 	uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
4246 {
4247 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4248 	struct dp_soc *soc = pdev->soc;
4249 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
4250 
4251 	if (!vdev) {
4252 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4253 			FL("DP VDEV memory allocation failed"));
4254 		goto fail0;
4255 	}
4256 
4257 	vdev->pdev = pdev;
4258 	vdev->vdev_id = vdev_id;
4259 	vdev->opmode = op_mode;
4260 	vdev->osdev = soc->osdev;
4261 
4262 	vdev->osif_rx = NULL;
4263 	vdev->osif_rsim_rx_decap = NULL;
4264 	vdev->osif_get_key = NULL;
4265 	vdev->osif_rx_mon = NULL;
4266 	vdev->osif_tx_free_ext = NULL;
4267 	vdev->osif_vdev = NULL;
4268 
4269 	vdev->delete.pending = 0;
4270 	vdev->safemode = 0;
4271 	vdev->drop_unenc = 1;
4272 	vdev->sec_type = cdp_sec_type_none;
4273 #ifdef notyet
4274 	vdev->filters_num = 0;
4275 #endif
4276 
4277 	qdf_mem_copy(
4278 		&vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
4279 
4280 	/* TODO: Initialize default HTT meta data that will be used in
4281 	 * TCL descriptors for packets transmitted from this VDEV
4282 	 */
4283 
4284 	TAILQ_INIT(&vdev->peer_list);
4285 
4286 	if ((soc->intr_mode == DP_INTR_POLL) &&
4287 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
4288 		if ((pdev->vdev_count == 0) ||
4289 		    (wlan_op_mode_monitor == vdev->opmode))
4290 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
4291 	}
4292 
4293 	if (wlan_op_mode_monitor == vdev->opmode) {
4294 		pdev->monitor_vdev = vdev;
4295 		return (struct cdp_vdev *)vdev;
4296 	}
4297 
4298 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
4299 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
4300 	vdev->dscp_tid_map_id = 0;
4301 	vdev->mcast_enhancement_en = 0;
4302 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
4303 
4304 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4305 	/* add this vdev into the pdev's list */
4306 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
4307 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4308 	pdev->vdev_count++;
4309 
4310 	dp_tx_vdev_attach(vdev);
4311 
4312 	if (pdev->vdev_count == 1)
4313 		dp_lro_hash_setup(soc, pdev);
4314 
4315 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4316 		"Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
4317 	DP_STATS_INIT(vdev);
4318 
4319 	if (wlan_op_mode_sta == vdev->opmode)
4320 		dp_peer_create_wifi3((struct cdp_vdev *)vdev,
4321 							vdev->mac_addr.raw,
4322 							NULL);
4323 
4324 	return (struct cdp_vdev *)vdev;
4325 
4326 fail0:
4327 	return NULL;
4328 }
4329 
4330 /**
4331  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
4332  * @vdev: Datapath VDEV handle
4333  * @osif_vdev: OSIF vdev handle
4334  * @ctrl_vdev: UMAC vdev handle
4335  * @txrx_ops: Tx and Rx operations
4336  *
4337  * Return: DP VDEV handle on success, NULL on failure
4338  */
4339 static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
4340 	void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
4341 	struct ol_txrx_ops *txrx_ops)
4342 {
4343 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4344 	vdev->osif_vdev = osif_vdev;
4345 	vdev->ctrl_vdev = ctrl_vdev;
4346 	vdev->osif_rx = txrx_ops->rx.rx;
4347 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
4348 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
4349 	vdev->osif_get_key = txrx_ops->get_key;
4350 	vdev->osif_rx_mon = txrx_ops->rx.mon;
4351 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
4352 #ifdef notyet
4353 #if ATH_SUPPORT_WAPI
4354 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
4355 #endif
4356 #endif
4357 #ifdef UMAC_SUPPORT_PROXY_ARP
4358 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
4359 #endif
4360 	vdev->me_convert = txrx_ops->me_convert;
4361 
4362 	/* TODO: Enable the following once Tx code is integrated */
4363 	if (vdev->mesh_vdev)
4364 		txrx_ops->tx.tx = dp_tx_send_mesh;
4365 	else
4366 		txrx_ops->tx.tx = dp_tx_send;
4367 
4368 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
4369 
4370 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
4371 		"DP Vdev Register success");
4372 }
4373 
4374 /**
4375  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
4376  * @vdev: Datapath VDEV handle
4377  * @unmap_only: Flag to indicate "only unmap"
4378  *
4379  * Return: void
4380  */
4381 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
4382 {
4383 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4384 	struct dp_pdev *pdev = vdev->pdev;
4385 	struct dp_soc *soc = pdev->soc;
4386 	struct dp_peer *peer;
4387 	uint16_t *peer_ids;
4388 	uint8_t i = 0, j = 0;
4389 
4390 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
4391 	if (!peer_ids) {
4392 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4393 			"DP alloc failure - unable to flush peers");
4394 		return;
4395 	}
4396 
4397 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4398 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4399 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
4400 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
4401 				if (j < soc->max_peers)
4402 					peer_ids[j++] = peer->peer_ids[i];
4403 	}
4404 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4405 
4406 	for (i = 0; i < j ; i++) {
4407 		peer = dp_peer_find_by_id(soc, peer_ids[i]);
4408 		if (peer) {
4409 			dp_info("peer: %pM is getting flush",
4410 				peer->mac_addr.raw);
4411 
4412 			if (!unmap_only)
4413 				dp_peer_delete_wifi3(peer, 0);
4414 			/*
4415 			 * we need to call dp_peer_unref_del_find_by_id()
4416 			 * to remove additional ref count incremented
4417 			 * by dp_peer_find_by_id() call.
4418 			 *
4419 			 * Hold the ref count while executing
4420 			 * dp_peer_delete_wifi3() call.
4421 			 *
4422 			 */
4423 			dp_peer_unref_del_find_by_id(peer);
4424 			dp_rx_peer_unmap_handler(soc, peer_ids[i],
4425 						 vdev->vdev_id,
4426 						 peer->mac_addr.raw, 0);
4427 		}
4428 	}
4429 
4430 	qdf_mem_free(peer_ids);
4431 
4432 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4433 		FL("Flushed peers for vdev object %pK "), vdev);
4434 }
4435 
4436 /*
4437  * dp_vdev_detach_wifi3() - Detach txrx vdev
4438  * @txrx_vdev:		Datapath VDEV handle
4439  * @callback:		Callback OL_IF on completion of detach
4440  * @cb_context:	Callback context
4441  *
4442  */
4443 static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
4444 	ol_txrx_vdev_delete_cb callback, void *cb_context)
4445 {
4446 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4447 	struct dp_pdev *pdev = vdev->pdev;
4448 	struct dp_soc *soc = pdev->soc;
4449 	struct dp_neighbour_peer *peer = NULL;
4450 	struct dp_neighbour_peer *temp_peer = NULL;
4451 
4452 	/* preconditions */
4453 	qdf_assert(vdev);
4454 
4455 	if (wlan_op_mode_monitor == vdev->opmode)
4456 		goto free_vdev;
4457 
4458 	if (wlan_op_mode_sta == vdev->opmode)
4459 		dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
4460 
4461 	/*
4462 	 * If Target is hung, flush all peers before detaching vdev
4463 	 * this will free all references held due to missing
4464 	 * unmap commands from Target
4465 	 */
4466 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
4467 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
4468 
4469 	/*
4470 	 * Use peer_ref_mutex while accessing peer_list, in case
4471 	 * a peer is in the process of being removed from the list.
4472 	 */
4473 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4474 	/* check that the vdev has no peers allocated */
4475 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
4476 		/* debug print - will be removed later */
4477 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
4478 			FL("not deleting vdev object %pK (%pM)"
4479 			"until deletion finishes for all its peers"),
4480 			vdev, vdev->mac_addr.raw);
4481 		/* indicate that the vdev needs to be deleted */
4482 		vdev->delete.pending = 1;
4483 		vdev->delete.callback = callback;
4484 		vdev->delete.context = cb_context;
4485 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4486 		return;
4487 	}
4488 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4489 
4490 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4491 	if (!soc->hw_nac_monitor_support) {
4492 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
4493 			      neighbour_peer_list_elem) {
4494 			QDF_ASSERT(peer->vdev != vdev);
4495 		}
4496 	} else {
4497 		TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
4498 				   neighbour_peer_list_elem, temp_peer) {
4499 			if (peer->vdev == vdev) {
4500 				TAILQ_REMOVE(&pdev->neighbour_peers_list, peer,
4501 					     neighbour_peer_list_elem);
4502 				qdf_mem_free(peer);
4503 			}
4504 		}
4505 	}
4506 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4507 
4508 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4509 	dp_tx_vdev_detach(vdev);
4510 	/* remove the vdev from its parent pdev's list */
4511 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
4512 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4513 		FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
4514 
4515 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4516 free_vdev:
4517 	qdf_mem_free(vdev);
4518 
4519 	if (callback)
4520 		callback(cb_context);
4521 }
4522 
4523 /*
4524  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
4525  * @soc - datapath soc handle
4526  * @peer - datapath peer handle
4527  *
4528  * Delete the AST entries belonging to a peer
4529  */
4530 #ifdef FEATURE_AST
4531 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
4532 					      struct dp_peer *peer)
4533 {
4534 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
4535 
4536 	qdf_spin_lock_bh(&soc->ast_lock);
4537 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
4538 		dp_peer_del_ast(soc, ast_entry);
4539 
4540 	peer->self_ast_entry = NULL;
4541 	qdf_spin_unlock_bh(&soc->ast_lock);
4542 }
4543 #else
4544 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
4545 					      struct dp_peer *peer)
4546 {
4547 }
4548 #endif
4549 
4550 #if ATH_SUPPORT_WRAP
4551 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
4552 						uint8_t *peer_mac_addr)
4553 {
4554 	struct dp_peer *peer;
4555 
4556 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
4557 				      0, vdev->vdev_id);
4558 	if (!peer)
4559 		return NULL;
4560 
4561 	if (peer->bss_peer)
4562 		return peer;
4563 
4564 	dp_peer_unref_delete(peer);
4565 	return NULL;
4566 }
4567 #else
4568 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
4569 						uint8_t *peer_mac_addr)
4570 {
4571 	struct dp_peer *peer;
4572 
4573 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
4574 				      0, vdev->vdev_id);
4575 	if (!peer)
4576 		return NULL;
4577 
4578 	if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
4579 		return peer;
4580 
4581 	dp_peer_unref_delete(peer);
4582 	return NULL;
4583 }
4584 #endif
4585 
4586 #ifdef FEATURE_AST
4587 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
4588 					       uint8_t *peer_mac_addr)
4589 {
4590 	struct dp_ast_entry *ast_entry;
4591 
4592 	qdf_spin_lock_bh(&soc->ast_lock);
4593 	ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
4594 
4595 	if (ast_entry && ast_entry->next_hop &&
4596 	    !ast_entry->delete_in_progress)
4597 		dp_peer_del_ast(soc, ast_entry);
4598 
4599 	qdf_spin_unlock_bh(&soc->ast_lock);
4600 }
4601 #endif
4602 
4603 /*
4604  * dp_peer_create_wifi3() - attach txrx peer
4605  * @txrx_vdev: Datapath VDEV handle
4606  * @peer_mac_addr: Peer MAC address
4607  *
4608  * Return: DP peeer handle on success, NULL on failure
4609  */
4610 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
4611 		uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
4612 {
4613 	struct dp_peer *peer;
4614 	int i;
4615 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4616 	struct dp_pdev *pdev;
4617 	struct dp_soc *soc;
4618 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
4619 
4620 	/* preconditions */
4621 	qdf_assert(vdev);
4622 	qdf_assert(peer_mac_addr);
4623 
4624 	pdev = vdev->pdev;
4625 	soc = pdev->soc;
4626 
4627 	/*
4628 	 * If a peer entry with given MAC address already exists,
4629 	 * reuse the peer and reset the state of peer.
4630 	 */
4631 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
4632 
4633 	if (peer) {
4634 		qdf_atomic_init(&peer->is_default_route_set);
4635 		dp_peer_cleanup(vdev, peer);
4636 
4637 		peer->delete_in_progress = false;
4638 
4639 		dp_peer_delete_ast_entries(soc, peer);
4640 
4641 		if ((vdev->opmode == wlan_op_mode_sta) &&
4642 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
4643 		     DP_MAC_ADDR_LEN)) {
4644 			ast_type = CDP_TXRX_AST_TYPE_SELF;
4645 		}
4646 
4647 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
4648 
4649 		/*
4650 		* Control path maintains a node count which is incremented
4651 		* for every new peer create command. Since new peer is not being
4652 		* created and earlier reference is reused here,
4653 		* peer_unref_delete event is sent to control path to
4654 		* increment the count back.
4655 		*/
4656 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
4657 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
4658 				peer->mac_addr.raw, vdev->mac_addr.raw,
4659 				vdev->opmode, peer->ctrl_peer, ctrl_peer);
4660 		}
4661 		peer->ctrl_peer = ctrl_peer;
4662 
4663 		dp_local_peer_id_alloc(pdev, peer);
4664 		DP_STATS_INIT(peer);
4665 
4666 		return (void *)peer;
4667 	} else {
4668 		/*
4669 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
4670 		 * need to remove the AST entry which was earlier added as a WDS
4671 		 * entry.
4672 		 * If an AST entry exists, but no peer entry exists with a given
4673 		 * MAC addresses, we could deduce it as a WDS entry
4674 		 */
4675 		dp_peer_ast_handle_roam_del(soc, peer_mac_addr);
4676 	}
4677 
4678 #ifdef notyet
4679 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
4680 		soc->mempool_ol_ath_peer);
4681 #else
4682 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
4683 #endif
4684 
4685 	if (!peer)
4686 		return NULL; /* failure */
4687 
4688 	qdf_mem_zero(peer, sizeof(struct dp_peer));
4689 
4690 	TAILQ_INIT(&peer->ast_entry_list);
4691 
4692 	/* store provided params */
4693 	peer->vdev = vdev;
4694 	peer->ctrl_peer = ctrl_peer;
4695 
4696 	if ((vdev->opmode == wlan_op_mode_sta) &&
4697 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
4698 			 DP_MAC_ADDR_LEN)) {
4699 		ast_type = CDP_TXRX_AST_TYPE_SELF;
4700 	}
4701 
4702 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
4703 
4704 	qdf_spinlock_create(&peer->peer_info_lock);
4705 
4706 	qdf_mem_copy(
4707 		&peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
4708 
4709 	/* TODO: See of rx_opt_proc is really required */
4710 	peer->rx_opt_proc = soc->rx_opt_proc;
4711 
4712 	/* initialize the peer_id */
4713 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
4714 		peer->peer_ids[i] = HTT_INVALID_PEER;
4715 
4716 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4717 
4718 	qdf_atomic_init(&peer->ref_cnt);
4719 
4720 	/* keep one reference for attach */
4721 	qdf_atomic_inc(&peer->ref_cnt);
4722 
4723 	/* add this peer into the vdev's list */
4724 	if (wlan_op_mode_sta == vdev->opmode)
4725 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
4726 	else
4727 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
4728 
4729 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4730 
4731 	/* TODO: See if hash based search is required */
4732 	dp_peer_find_hash_add(soc, peer);
4733 
4734 	/* Initialize the peer state */
4735 	peer->state = OL_TXRX_PEER_STATE_DISC;
4736 
4737 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4738 		"vdev %pK created peer %pK (%pM) ref_cnt: %d",
4739 		vdev, peer, peer->mac_addr.raw,
4740 		qdf_atomic_read(&peer->ref_cnt));
4741 	/*
4742 	 * For every peer MAp message search and set if bss_peer
4743 	 */
4744 	if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
4745 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4746 			"vdev bss_peer!!!!");
4747 		peer->bss_peer = 1;
4748 		vdev->vap_bss_peer = peer;
4749 	}
4750 	for (i = 0; i < DP_MAX_TIDS; i++)
4751 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
4752 
4753 	dp_local_peer_id_alloc(pdev, peer);
4754 	DP_STATS_INIT(peer);
4755 	return (void *)peer;
4756 }
4757 
4758 /*
4759  * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
4760  * @vdev: Datapath VDEV handle
4761  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
4762  * @hash_based: pointer to hash value (enabled/disabled) to be populated
4763  *
4764  * Return: None
4765  */
4766 static
4767 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
4768 				  enum cdp_host_reo_dest_ring *reo_dest,
4769 				  bool *hash_based)
4770 {
4771 	struct dp_soc *soc;
4772 	struct dp_pdev *pdev;
4773 
4774 	pdev = vdev->pdev;
4775 	soc = pdev->soc;
4776 	/*
4777 	 * hash based steering is disabled for Radios which are offloaded
4778 	 * to NSS
4779 	 */
4780 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
4781 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
4782 
4783 	/*
4784 	 * Below line of code will ensure the proper reo_dest ring is chosen
4785 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
4786 	 */
4787 	*reo_dest = pdev->reo_dest;
4788 }
4789 
4790 #ifdef IPA_OFFLOAD
4791 /*
4792  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
4793  * @vdev: Datapath VDEV handle
4794  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
4795  * @hash_based: pointer to hash value (enabled/disabled) to be populated
4796  *
4797  * If IPA is enabled in ini, for SAP mode, disable hash based
4798  * steering, use default reo_dst ring for RX. Use config values for other modes.
4799  * Return: None
4800  */
4801 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
4802 				       enum cdp_host_reo_dest_ring *reo_dest,
4803 				       bool *hash_based)
4804 {
4805 	struct dp_soc *soc;
4806 	struct dp_pdev *pdev;
4807 
4808 	pdev = vdev->pdev;
4809 	soc = pdev->soc;
4810 
4811 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
4812 
4813 	/*
4814 	 * If IPA is enabled, disable hash-based flow steering and set
4815 	 * reo_dest_ring_4 as the REO ring to receive packets on.
4816 	 * IPA is configured to reap reo_dest_ring_4.
4817 	 *
4818 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
4819 	 * value enum value is from 1 - 4.
4820 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
4821 	 */
4822 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4823 		if (vdev->opmode == wlan_op_mode_ap) {
4824 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
4825 			*hash_based = 0;
4826 		}
4827 	}
4828 }
4829 
4830 #else
4831 
4832 /*
4833  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
4834  * @vdev: Datapath VDEV handle
4835  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
4836  * @hash_based: pointer to hash value (enabled/disabled) to be populated
4837  *
4838  * Use system config values for hash based steering.
4839  * Return: None
4840  */
4841 
4842 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
4843 				       enum cdp_host_reo_dest_ring *reo_dest,
4844 				       bool *hash_based)
4845 {
4846 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
4847 }
4848 #endif /* IPA_OFFLOAD */
4849 
4850 /*
4851  * dp_peer_setup_wifi3() - initialize the peer
4852  * @vdev_hdl: virtual device object
4853  * @peer: Peer object
4854  *
4855  * Return: void
4856  */
4857 static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
4858 {
4859 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
4860 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4861 	struct dp_pdev *pdev;
4862 	struct dp_soc *soc;
4863 	bool hash_based = 0;
4864 	enum cdp_host_reo_dest_ring reo_dest;
4865 
4866 	/* preconditions */
4867 	qdf_assert(vdev);
4868 	qdf_assert(peer);
4869 
4870 	pdev = vdev->pdev;
4871 	soc = pdev->soc;
4872 
4873 	peer->last_assoc_rcvd = 0;
4874 	peer->last_disassoc_rcvd = 0;
4875 	peer->last_deauth_rcvd = 0;
4876 
4877 	dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
4878 
4879 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
4880 		pdev->pdev_id, vdev->vdev_id,
4881 		vdev->opmode, hash_based, reo_dest);
4882 
4883 
4884 	/*
4885 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
4886 	 * i.e both the devices have same MAC address. In these
4887 	 * cases we want such pkts to be processed in NULL Q handler
4888 	 * which is REO2TCL ring. for this reason we should
4889 	 * not setup reo_queues and default route for bss_peer.
4890 	 */
4891 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap)
4892 		return;
4893 
4894 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
4895 		/* TODO: Check the destination ring number to be passed to FW */
4896 		soc->cdp_soc.ol_ops->peer_set_default_routing(
4897 				pdev->ctrl_pdev, peer->mac_addr.raw,
4898 				peer->vdev->vdev_id, hash_based, reo_dest);
4899 	}
4900 
4901 	qdf_atomic_set(&peer->is_default_route_set, 1);
4902 
4903 	dp_peer_rx_init(pdev, peer);
4904 	return;
4905 }
4906 
4907 /*
4908  * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
4909  * @vdev_handle: virtual device object
4910  * @htt_pkt_type: type of pkt
4911  *
4912  * Return: void
4913  */
4914 static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
4915 	 enum htt_cmn_pkt_type val)
4916 {
4917 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4918 	vdev->tx_encap_type = val;
4919 }
4920 
4921 /*
4922  * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
4923  * @vdev_handle: virtual device object
4924  * @htt_pkt_type: type of pkt
4925  *
4926  * Return: void
4927  */
4928 static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
4929 	 enum htt_cmn_pkt_type val)
4930 {
4931 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4932 	vdev->rx_decap_type = val;
4933 }
4934 
4935 /*
4936  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
4937  * @txrx_soc: cdp soc handle
4938  * @ac: Access category
4939  * @value: timeout value in millisec
4940  *
4941  * Return: void
4942  */
4943 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
4944 				    uint8_t ac, uint32_t value)
4945 {
4946 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4947 
4948 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
4949 }
4950 
4951 /*
4952  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
4953  * @txrx_soc: cdp soc handle
4954  * @ac: access category
4955  * @value: timeout value in millisec
4956  *
4957  * Return: void
4958  */
4959 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
4960 				    uint8_t ac, uint32_t *value)
4961 {
4962 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4963 
4964 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
4965 }
4966 
4967 /*
4968  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
4969  * @pdev_handle: physical device object
4970  * @val: reo destination ring index (1 - 4)
4971  *
4972  * Return: void
4973  */
4974 static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
4975 	 enum cdp_host_reo_dest_ring val)
4976 {
4977 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4978 
4979 	if (pdev)
4980 		pdev->reo_dest = val;
4981 }
4982 
4983 /*
4984  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
4985  * @pdev_handle: physical device object
4986  *
4987  * Return: reo destination ring index
4988  */
4989 static enum cdp_host_reo_dest_ring
4990 dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
4991 {
4992 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4993 
4994 	if (pdev)
4995 		return pdev->reo_dest;
4996 	else
4997 		return cdp_host_reo_dest_ring_unknown;
4998 }
4999 
5000 /*
5001  * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
5002  * @pdev_handle: device object
5003  * @val: value to be set
5004  *
5005  * Return: void
5006  */
5007 static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
5008 	 uint32_t val)
5009 {
5010 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5011 
5012 	/* Enable/Disable smart mesh filtering. This flag will be checked
5013 	 * during rx processing to check if packets are from NAC clients.
5014 	 */
5015 	pdev->filter_neighbour_peers = val;
5016 	return 0;
5017 }
5018 
5019 /*
5020  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
5021  * address for smart mesh filtering
5022  * @vdev_handle: virtual device object
5023  * @cmd: Add/Del command
5024  * @macaddr: nac client mac address
5025  *
5026  * Return: void
5027  */
5028 static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
5029 					    uint32_t cmd, uint8_t *macaddr)
5030 {
5031 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5032 	struct dp_pdev *pdev = vdev->pdev;
5033 	struct dp_neighbour_peer *peer = NULL;
5034 
5035 	if (!macaddr)
5036 		goto fail0;
5037 
5038 	/* Store address of NAC (neighbour peer) which will be checked
5039 	 * against TA of received packets.
5040 	 */
5041 	if (cmd == DP_NAC_PARAM_ADD) {
5042 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
5043 				sizeof(*peer));
5044 
5045 		if (!peer) {
5046 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5047 				FL("DP neighbour peer node memory allocation failed"));
5048 			goto fail0;
5049 		}
5050 
5051 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
5052 			macaddr, DP_MAC_ADDR_LEN);
5053 		peer->vdev = vdev;
5054 
5055 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5056 
5057 		/* add this neighbour peer into the list */
5058 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
5059 				neighbour_peer_list_elem);
5060 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5061 
5062 		/* first neighbour */
5063 		if (!pdev->neighbour_peers_added) {
5064 			pdev->neighbour_peers_added = true;
5065 			dp_ppdu_ring_cfg(pdev);
5066 		}
5067 		return 1;
5068 
5069 	} else if (cmd == DP_NAC_PARAM_DEL) {
5070 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5071 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
5072 				neighbour_peer_list_elem) {
5073 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
5074 				macaddr, DP_MAC_ADDR_LEN)) {
5075 				/* delete this peer from the list */
5076 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
5077 					peer, neighbour_peer_list_elem);
5078 				qdf_mem_free(peer);
5079 				break;
5080 			}
5081 		}
5082 		/* last neighbour deleted */
5083 		if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) {
5084 			pdev->neighbour_peers_added = false;
5085 			dp_ppdu_ring_cfg(pdev);
5086 		}
5087 
5088 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5089 
5090 		if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
5091 		    !pdev->enhanced_stats_en)
5092 			dp_ppdu_ring_reset(pdev);
5093 		return 1;
5094 
5095 	}
5096 
5097 fail0:
5098 	return 0;
5099 }
5100 
5101 /*
5102  * dp_get_sec_type() - Get the security type
5103  * @peer:		Datapath peer handle
5104  * @sec_idx:    Security id (mcast, ucast)
5105  *
5106  * return sec_type: Security type
5107  */
5108 static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
5109 {
5110 	struct dp_peer *dpeer = (struct dp_peer *)peer;
5111 
5112 	return dpeer->security[sec_idx].sec_type;
5113 }
5114 
5115 /*
5116  * dp_peer_authorize() - authorize txrx peer
5117  * @peer_handle:		Datapath peer handle
5118  * @authorize
5119  *
5120  */
5121 static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
5122 {
5123 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5124 	struct dp_soc *soc;
5125 
5126 	if (peer != NULL) {
5127 		soc = peer->vdev->pdev->soc;
5128 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
5129 		peer->authorize = authorize ? 1 : 0;
5130 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5131 	}
5132 }
5133 
5134 static void dp_reset_and_release_peer_mem(struct dp_soc *soc,
5135 					  struct dp_pdev *pdev,
5136 					  struct dp_peer *peer,
5137 					  uint32_t vdev_id)
5138 {
5139 	struct dp_vdev *vdev = NULL;
5140 	struct dp_peer *bss_peer = NULL;
5141 	uint8_t *m_addr = NULL;
5142 
5143 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5144 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5145 		if (vdev->vdev_id == vdev_id)
5146 			break;
5147 	}
5148 	if (!vdev) {
5149 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5150 			  "vdev is NULL");
5151 	} else {
5152 		if (vdev->vap_bss_peer == peer)
5153 		    vdev->vap_bss_peer = NULL;
5154 		m_addr = peer->mac_addr.raw;
5155 		if (soc->cdp_soc.ol_ops->peer_unref_delete)
5156 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
5157 				m_addr, vdev->mac_addr.raw, vdev->opmode,
5158 				peer->ctrl_peer, NULL);
5159 
5160 		if (vdev && vdev->vap_bss_peer) {
5161 		    bss_peer = vdev->vap_bss_peer;
5162 		    DP_UPDATE_STATS(vdev, peer);
5163 		}
5164 	}
5165 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5166 
5167 	/*
5168 	 * Peer AST list hast to be empty here
5169 	 */
5170 	DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
5171 
5172 	qdf_mem_free(peer);
5173 }
5174 
5175 /**
5176  * dp_delete_pending_vdev() - check and process vdev delete
5177  * @pdev: DP specific pdev pointer
5178  * @vdev: DP specific vdev pointer
5179  * @vdev_id: vdev id corresponding to vdev
5180  *
5181  * This API does following:
5182  * 1) It releases tx flow pools buffers as vdev is
5183  *    going down and no peers are associated.
5184  * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory
5185  */
5186 static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev,
5187 				   uint8_t vdev_id)
5188 {
5189 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
5190 	void *vdev_delete_context = NULL;
5191 
5192 	vdev_delete_cb = vdev->delete.callback;
5193 	vdev_delete_context = vdev->delete.context;
5194 
5195 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5196 		  FL("deleting vdev object %pK (%pM)- its last peer is done"),
5197 		  vdev, vdev->mac_addr.raw);
5198 	/* all peers are gone, go ahead and delete it */
5199 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
5200 			FLOW_TYPE_VDEV, vdev_id);
5201 	dp_tx_vdev_detach(vdev);
5202 
5203 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5204 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
5205 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5206 
5207 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5208 		  FL("deleting vdev object %pK (%pM)"),
5209 		  vdev, vdev->mac_addr.raw);
5210 	qdf_mem_free(vdev);
5211 	vdev = NULL;
5212 
5213 	if (vdev_delete_cb)
5214 		vdev_delete_cb(vdev_delete_context);
5215 }
5216 
5217 /*
5218  * dp_peer_unref_delete() - unref and delete peer
5219  * @peer_handle:		Datapath peer handle
5220  *
5221  */
5222 void dp_peer_unref_delete(void *peer_handle)
5223 {
5224 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5225 	struct dp_vdev *vdev = peer->vdev;
5226 	struct dp_pdev *pdev = vdev->pdev;
5227 	struct dp_soc *soc = pdev->soc;
5228 	struct dp_peer *tmppeer;
5229 	int found = 0;
5230 	uint16_t peer_id;
5231 	uint16_t vdev_id;
5232 	bool delete_vdev;
5233 
5234 	/*
5235 	 * Hold the lock all the way from checking if the peer ref count
5236 	 * is zero until the peer references are removed from the hash
5237 	 * table and vdev list (if the peer ref count is zero).
5238 	 * This protects against a new HL tx operation starting to use the
5239 	 * peer object just after this function concludes it's done being used.
5240 	 * Furthermore, the lock needs to be held while checking whether the
5241 	 * vdev's list of peers is empty, to make sure that list is not modified
5242 	 * concurrently with the empty check.
5243 	 */
5244 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5245 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
5246 		peer_id = peer->peer_ids[0];
5247 		vdev_id = vdev->vdev_id;
5248 
5249 		/*
5250 		 * Make sure that the reference to the peer in
5251 		 * peer object map is removed
5252 		 */
5253 		if (peer_id != HTT_INVALID_PEER)
5254 			soc->peer_id_to_obj_map[peer_id] = NULL;
5255 
5256 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5257 			"Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
5258 
5259 		/* remove the reference to the peer from the hash table */
5260 		dp_peer_find_hash_remove(soc, peer);
5261 
5262 		qdf_spin_lock_bh(&soc->ast_lock);
5263 		if (peer->self_ast_entry) {
5264 			dp_peer_del_ast(soc, peer->self_ast_entry);
5265 			peer->self_ast_entry = NULL;
5266 		}
5267 		qdf_spin_unlock_bh(&soc->ast_lock);
5268 
5269 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
5270 			if (tmppeer == peer) {
5271 				found = 1;
5272 				break;
5273 			}
5274 		}
5275 
5276 		if (found) {
5277 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
5278 				peer_list_elem);
5279 		} else {
5280 			/*Ignoring the remove operation as peer not found*/
5281 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5282 				  "peer:%pK not found in vdev:%pK peerlist:%pK",
5283 				  peer, vdev, &peer->vdev->peer_list);
5284 		}
5285 
5286 		/* cleanup the peer data */
5287 		dp_peer_cleanup(vdev, peer);
5288 
5289 		/* check whether the parent vdev has no peers left */
5290 		if (TAILQ_EMPTY(&vdev->peer_list)) {
5291 			/*
5292 			 * capture vdev delete pending flag's status
5293 			 * while holding peer_ref_mutex lock
5294 			 */
5295 			delete_vdev = vdev->delete.pending;
5296 			/*
5297 			 * Now that there are no references to the peer, we can
5298 			 * release the peer reference lock.
5299 			 */
5300 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5301 			/*
5302 			 * Check if the parent vdev was waiting for its peers
5303 			 * to be deleted, in order for it to be deleted too.
5304 			 */
5305 			if (delete_vdev)
5306 				dp_delete_pending_vdev(pdev, vdev, vdev_id);
5307 		} else {
5308 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5309 		}
5310 		dp_reset_and_release_peer_mem(soc, pdev, peer, vdev_id);
5311 
5312 	} else {
5313 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5314 	}
5315 }
5316 
5317 /*
5318  * dp_peer_detach_wifi3() – Detach txrx peer
5319  * @peer_handle: Datapath peer handle
5320  * @bitmap: bitmap indicating special handling of request.
5321  *
5322  */
5323 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
5324 {
5325 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5326 
5327 	/* redirect the peer's rx delivery function to point to a
5328 	 * discard func
5329 	 */
5330 
5331 	peer->rx_opt_proc = dp_rx_discard;
5332 
5333 	/* Do not make ctrl_peer to NULL for connected sta peers.
5334 	 * We need ctrl_peer to release the reference during dp
5335 	 * peer free. This reference was held for
5336 	 * obj_mgr peer during the creation of dp peer.
5337 	 */
5338 	if (!(peer->vdev && (peer->vdev->opmode != wlan_op_mode_sta) &&
5339 	      !peer->bss_peer))
5340 		peer->ctrl_peer = NULL;
5341 
5342 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5343 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
5344 
5345 	dp_local_peer_id_free(peer->vdev->pdev, peer);
5346 	qdf_spinlock_destroy(&peer->peer_info_lock);
5347 
5348 	/*
5349 	 * Remove the reference added during peer_attach.
5350 	 * The peer will still be left allocated until the
5351 	 * PEER_UNMAP message arrives to remove the other
5352 	 * reference, added by the PEER_MAP message.
5353 	 */
5354 	dp_peer_unref_delete(peer_handle);
5355 }
5356 
5357 /*
5358  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
5359  * @peer_handle:		Datapath peer handle
5360  *
5361  */
5362 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
5363 {
5364 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
5365 	return vdev->mac_addr.raw;
5366 }
5367 
5368 /*
5369  * dp_vdev_set_wds() - Enable per packet stats
5370  * @vdev_handle: DP VDEV handle
5371  * @val: value
5372  *
5373  * Return: none
5374  */
5375 static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
5376 {
5377 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5378 
5379 	vdev->wds_enabled = val;
5380 	return 0;
5381 }
5382 
5383 /*
5384  * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
5385  * @peer_handle:		Datapath peer handle
5386  *
5387  */
5388 static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
5389 						uint8_t vdev_id)
5390 {
5391 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
5392 	struct dp_vdev *vdev = NULL;
5393 
5394 	if (qdf_unlikely(!pdev))
5395 		return NULL;
5396 
5397 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5398 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5399 		if (vdev->vdev_id == vdev_id)
5400 			break;
5401 	}
5402 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5403 
5404 	return (struct cdp_vdev *)vdev;
5405 }
5406 
5407 /*
5408  * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev handle of monitor mode
5409  * @dev: PDEV handle
5410  *
5411  * Return: VDEV handle of monitor mode
5412  */
5413 
5414 static struct cdp_vdev *dp_get_mon_vdev_from_pdev_wifi3(struct cdp_pdev *dev)
5415 {
5416 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
5417 
5418 	if (qdf_unlikely(!pdev))
5419 		return NULL;
5420 
5421 	return (struct cdp_vdev *)pdev->monitor_vdev;
5422 }
5423 
5424 static int dp_get_opmode(struct cdp_vdev *vdev_handle)
5425 {
5426 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5427 
5428 	return vdev->opmode;
5429 }
5430 
5431 static
5432 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_vdev *pvdev,
5433 					  ol_txrx_rx_fp *stack_fn_p,
5434 					  ol_osif_vdev_handle *osif_vdev_p)
5435 {
5436 	struct dp_vdev *vdev = dp_get_dp_vdev_from_cdp_vdev(pvdev);
5437 
5438 	qdf_assert(vdev);
5439 	*stack_fn_p = vdev->osif_rx_stack;
5440 	*osif_vdev_p = vdev->osif_vdev;
5441 }
5442 
5443 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
5444 {
5445 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
5446 	struct dp_pdev *pdev = vdev->pdev;
5447 
5448 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
5449 }
5450 
5451 /**
5452  * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
5453  *                                 ring based on target
5454  * @soc: soc handle
5455  * @mac_for_pdev: pdev_id
5456  * @pdev: physical device handle
5457  * @ring_num: mac id
5458  * @htt_tlv_filter: tlv filter
5459  *
5460  * Return: zero on success, non-zero on failure
5461  */
5462 static inline
5463 QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
5464 				       struct dp_pdev *pdev, uint8_t ring_num,
5465 				       struct htt_rx_ring_tlv_filter htt_tlv_filter)
5466 {
5467 	QDF_STATUS status;
5468 
5469 	if (soc->wlan_cfg_ctx->rxdma1_enable)
5470 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5471 					     pdev->rxdma_mon_buf_ring[ring_num]
5472 					     .hal_srng,
5473 					     RXDMA_MONITOR_BUF, RX_BUFFER_SIZE,
5474 					     &htt_tlv_filter);
5475 	else
5476 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5477 					     pdev->rx_mac_buf_ring[ring_num]
5478 					     .hal_srng,
5479 					     RXDMA_BUF, RX_BUFFER_SIZE,
5480 					     &htt_tlv_filter);
5481 
5482 	return status;
5483 }
5484 
5485 /**
5486  * dp_reset_monitor_mode() - Disable monitor mode
5487  * @pdev_handle: Datapath PDEV handle
5488  *
5489  * Return: 0 on success, not 0 on failure
5490  */
5491 static QDF_STATUS dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
5492 {
5493 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5494 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5495 	struct dp_soc *soc = pdev->soc;
5496 	uint8_t pdev_id;
5497 	int mac_id;
5498 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5499 
5500 	pdev_id = pdev->pdev_id;
5501 	soc = pdev->soc;
5502 
5503 	qdf_spin_lock_bh(&pdev->mon_lock);
5504 
5505 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5506 
5507 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5508 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5509 
5510 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
5511 						     pdev, mac_id,
5512 						     htt_tlv_filter);
5513 
5514 		if (status != QDF_STATUS_SUCCESS) {
5515 			dp_err("Failed to send tlv filter for monitor mode rings");
5516 			return status;
5517 		}
5518 
5519 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5520 			    pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5521 			    RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE,
5522 			    &htt_tlv_filter);
5523 	}
5524 
5525 	pdev->monitor_vdev = NULL;
5526 	pdev->mcopy_mode = 0;
5527 	pdev->monitor_configured = false;
5528 
5529 	qdf_spin_unlock_bh(&pdev->mon_lock);
5530 
5531 	return QDF_STATUS_SUCCESS;
5532 }
5533 
5534 /**
5535  * dp_set_nac() - set peer_nac
5536  * @peer_handle: Datapath PEER handle
5537  *
5538  * Return: void
5539  */
5540 static void dp_set_nac(struct cdp_peer *peer_handle)
5541 {
5542 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5543 
5544 	peer->nac = 1;
5545 }
5546 
5547 /**
5548  * dp_get_tx_pending() - read pending tx
5549  * @pdev_handle: Datapath PDEV handle
5550  *
5551  * Return: outstanding tx
5552  */
5553 static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
5554 {
5555 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5556 
5557 	return qdf_atomic_read(&pdev->num_tx_outstanding);
5558 }
5559 
5560 /**
5561  * dp_get_peer_mac_from_peer_id() - get peer mac
5562  * @pdev_handle: Datapath PDEV handle
5563  * @peer_id: Peer ID
5564  * @peer_mac: MAC addr of PEER
5565  *
5566  * Return: void
5567  */
5568 static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
5569 	uint32_t peer_id, uint8_t *peer_mac)
5570 {
5571 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5572 	struct dp_peer *peer;
5573 
5574 	if (pdev && peer_mac) {
5575 		peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
5576 		if (peer) {
5577 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
5578 				     DP_MAC_ADDR_LEN);
5579 			dp_peer_unref_del_find_by_id(peer);
5580 		}
5581 	}
5582 }
5583 
5584 /**
5585  * dp_pdev_configure_monitor_rings() - configure monitor rings
5586  * @vdev_handle: Datapath VDEV handle
5587  *
5588  * Return: void
5589  */
5590 static QDF_STATUS dp_pdev_configure_monitor_rings(struct dp_pdev *pdev)
5591 {
5592 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5593 	struct dp_soc *soc;
5594 	uint8_t pdev_id;
5595 	int mac_id;
5596 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5597 
5598 	pdev_id = pdev->pdev_id;
5599 	soc = pdev->soc;
5600 
5601 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5602 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
5603 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
5604 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
5605 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
5606 		pdev->mo_data_filter);
5607 
5608 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5609 
5610 	htt_tlv_filter.mpdu_start = 1;
5611 	htt_tlv_filter.msdu_start = 1;
5612 	htt_tlv_filter.packet = 1;
5613 	htt_tlv_filter.msdu_end = 1;
5614 	htt_tlv_filter.mpdu_end = 1;
5615 	htt_tlv_filter.packet_header = 1;
5616 	htt_tlv_filter.attention = 1;
5617 	htt_tlv_filter.ppdu_start = 0;
5618 	htt_tlv_filter.ppdu_end = 0;
5619 	htt_tlv_filter.ppdu_end_user_stats = 0;
5620 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
5621 	htt_tlv_filter.ppdu_end_status_done = 0;
5622 	htt_tlv_filter.header_per_msdu = 1;
5623 	htt_tlv_filter.enable_fp =
5624 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
5625 	htt_tlv_filter.enable_md = 0;
5626 	htt_tlv_filter.enable_mo =
5627 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
5628 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
5629 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
5630 	if (pdev->mcopy_mode)
5631 		htt_tlv_filter.fp_data_filter = 0;
5632 	else
5633 		htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
5634 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
5635 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
5636 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
5637 
5638 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5639 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5640 
5641 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
5642 						     pdev, mac_id,
5643 						     htt_tlv_filter);
5644 
5645 		if (status != QDF_STATUS_SUCCESS) {
5646 			dp_err("Failed to send tlv filter for monitor mode rings");
5647 			return status;
5648 		}
5649 	}
5650 
5651 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5652 
5653 	htt_tlv_filter.mpdu_start = 1;
5654 	htt_tlv_filter.msdu_start = 0;
5655 	htt_tlv_filter.packet = 0;
5656 	htt_tlv_filter.msdu_end = 0;
5657 	htt_tlv_filter.mpdu_end = 0;
5658 	htt_tlv_filter.attention = 0;
5659 	htt_tlv_filter.ppdu_start = 1;
5660 	htt_tlv_filter.ppdu_end = 1;
5661 	htt_tlv_filter.ppdu_end_user_stats = 1;
5662 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5663 	htt_tlv_filter.ppdu_end_status_done = 1;
5664 	htt_tlv_filter.enable_fp = 1;
5665 	htt_tlv_filter.enable_md = 0;
5666 	htt_tlv_filter.enable_mo = 1;
5667 	if (pdev->mcopy_mode) {
5668 		htt_tlv_filter.packet_header = 1;
5669 	}
5670 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5671 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5672 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5673 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5674 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5675 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
5676 
5677 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5678 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5679 						pdev->pdev_id);
5680 
5681 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5682 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5683 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5684 	}
5685 
5686 	return status;
5687 }
5688 
5689 /**
5690  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
5691  * @vdev_handle: Datapath VDEV handle
5692  * @smart_monitor: Flag to denote if its smart monitor mode
5693  *
5694  * Return: 0 on success, not 0 on failure
5695  */
5696 static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
5697 					   uint8_t smart_monitor)
5698 {
5699 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5700 	struct dp_pdev *pdev;
5701 
5702 	qdf_assert(vdev);
5703 
5704 	pdev = vdev->pdev;
5705 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
5706 		  "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
5707 		  pdev, pdev->pdev_id, pdev->soc, vdev);
5708 
5709 	/*Check if current pdev's monitor_vdev exists */
5710 	if (pdev->monitor_configured) {
5711 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5712 			  "monitor vap already created vdev=%pK\n", vdev);
5713 		qdf_assert(vdev);
5714 		return QDF_STATUS_E_RESOURCES;
5715 	}
5716 
5717 	pdev->monitor_vdev = vdev;
5718 	pdev->monitor_configured = true;
5719 
5720 	/* If smart monitor mode, do not configure monitor ring */
5721 	if (smart_monitor)
5722 		return QDF_STATUS_SUCCESS;
5723 
5724 	return dp_pdev_configure_monitor_rings(pdev);
5725 }
5726 
5727 /**
5728  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
5729  * @pdev_handle: Datapath PDEV handle
5730  * @filter_val: Flag to select Filter for monitor mode
5731  * Return: 0 on success, not 0 on failure
5732  */
5733 static QDF_STATUS
5734 dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
5735 				   struct cdp_monitor_filter *filter_val)
5736 {
5737 	/* Many monitor VAPs can exists in a system but only one can be up at
5738 	 * anytime
5739 	 */
5740 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5741 	struct dp_vdev *vdev = pdev->monitor_vdev;
5742 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5743 	struct dp_soc *soc;
5744 	uint8_t pdev_id;
5745 	int mac_id;
5746 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5747 
5748 	pdev_id = pdev->pdev_id;
5749 	soc = pdev->soc;
5750 
5751 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
5752 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
5753 		pdev, pdev_id, soc, vdev);
5754 
5755 	/*Check if current pdev's monitor_vdev exists */
5756 	if (!pdev->monitor_vdev) {
5757 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5758 			"vdev=%pK", vdev);
5759 		qdf_assert(vdev);
5760 	}
5761 
5762 	/* update filter mode, type in pdev structure */
5763 	pdev->mon_filter_mode = filter_val->mode;
5764 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
5765 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
5766 	pdev->fp_data_filter = filter_val->fp_data;
5767 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
5768 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
5769 	pdev->mo_data_filter = filter_val->mo_data;
5770 
5771 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5772 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
5773 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
5774 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
5775 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
5776 		pdev->mo_data_filter);
5777 
5778 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5779 
5780 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5781 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5782 
5783 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
5784 						     pdev, mac_id,
5785 						     htt_tlv_filter);
5786 
5787 		if (status != QDF_STATUS_SUCCESS) {
5788 			dp_err("Failed to send tlv filter for monitor mode rings");
5789 			return status;
5790 		}
5791 
5792 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5793 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5794 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5795 	}
5796 
5797 	htt_tlv_filter.mpdu_start = 1;
5798 	htt_tlv_filter.msdu_start = 1;
5799 	htt_tlv_filter.packet = 1;
5800 	htt_tlv_filter.msdu_end = 1;
5801 	htt_tlv_filter.mpdu_end = 1;
5802 	htt_tlv_filter.packet_header = 1;
5803 	htt_tlv_filter.attention = 1;
5804 	htt_tlv_filter.ppdu_start = 0;
5805 	htt_tlv_filter.ppdu_end = 0;
5806 	htt_tlv_filter.ppdu_end_user_stats = 0;
5807 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
5808 	htt_tlv_filter.ppdu_end_status_done = 0;
5809 	htt_tlv_filter.header_per_msdu = 1;
5810 	htt_tlv_filter.enable_fp =
5811 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
5812 	htt_tlv_filter.enable_md = 0;
5813 	htt_tlv_filter.enable_mo =
5814 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
5815 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
5816 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
5817 	if (pdev->mcopy_mode)
5818 		htt_tlv_filter.fp_data_filter = 0;
5819 	else
5820 		htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
5821 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
5822 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
5823 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
5824 
5825 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5826 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5827 
5828 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
5829 						     pdev, mac_id,
5830 						     htt_tlv_filter);
5831 
5832 		if (status != QDF_STATUS_SUCCESS) {
5833 			dp_err("Failed to send tlv filter for monitor mode rings");
5834 			return status;
5835 		}
5836 	}
5837 
5838 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5839 
5840 	htt_tlv_filter.mpdu_start = 1;
5841 	htt_tlv_filter.msdu_start = 0;
5842 	htt_tlv_filter.packet = 0;
5843 	htt_tlv_filter.msdu_end = 0;
5844 	htt_tlv_filter.mpdu_end = 0;
5845 	htt_tlv_filter.attention = 0;
5846 	htt_tlv_filter.ppdu_start = 1;
5847 	htt_tlv_filter.ppdu_end = 1;
5848 	htt_tlv_filter.ppdu_end_user_stats = 1;
5849 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5850 	htt_tlv_filter.ppdu_end_status_done = 1;
5851 	htt_tlv_filter.enable_fp = 1;
5852 	htt_tlv_filter.enable_md = 0;
5853 	htt_tlv_filter.enable_mo = 1;
5854 	if (pdev->mcopy_mode) {
5855 		htt_tlv_filter.packet_header = 1;
5856 	}
5857 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5858 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5859 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5860 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5861 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5862 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
5863 
5864 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5865 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5866 						pdev->pdev_id);
5867 
5868 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5869 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5870 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5871 	}
5872 
5873 	return QDF_STATUS_SUCCESS;
5874 }
5875 
5876 /**
5877  * dp_get_pdev_id_frm_pdev() - get pdev_id
5878  * @pdev_handle: Datapath PDEV handle
5879  *
5880  * Return: pdev_id
5881  */
5882 static
5883 uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
5884 {
5885 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5886 
5887 	return pdev->pdev_id;
5888 }
5889 
5890 /**
5891  * dp_pdev_set_chan_noise_floor() - set channel noise floor
5892  * @pdev_handle: Datapath PDEV handle
5893  * @chan_noise_floor: Channel Noise Floor
5894  *
5895  * Return: void
5896  */
5897 static
5898 void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
5899 				  int16_t chan_noise_floor)
5900 {
5901 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5902 
5903 	pdev->chan_noise_floor = chan_noise_floor;
5904 }
5905 
5906 /**
5907  * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
5908  * @vdev_handle: Datapath VDEV handle
5909  * Return: true on ucast filter flag set
5910  */
5911 static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
5912 {
5913 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5914 	struct dp_pdev *pdev;
5915 
5916 	pdev = vdev->pdev;
5917 
5918 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
5919 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
5920 		return true;
5921 
5922 	return false;
5923 }
5924 
5925 /**
5926  * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
5927  * @vdev_handle: Datapath VDEV handle
5928  * Return: true on mcast filter flag set
5929  */
5930 static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
5931 {
5932 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5933 	struct dp_pdev *pdev;
5934 
5935 	pdev = vdev->pdev;
5936 
5937 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
5938 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
5939 		return true;
5940 
5941 	return false;
5942 }
5943 
5944 /**
5945  * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
5946  * @vdev_handle: Datapath VDEV handle
5947  * Return: true on non data filter flag set
5948  */
5949 static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
5950 {
5951 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5952 	struct dp_pdev *pdev;
5953 
5954 	pdev = vdev->pdev;
5955 
5956 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
5957 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
5958 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
5959 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
5960 			return true;
5961 		}
5962 	}
5963 
5964 	return false;
5965 }
5966 
5967 #ifdef MESH_MODE_SUPPORT
5968 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
5969 {
5970 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5971 
5972 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5973 		FL("val %d"), val);
5974 	vdev->mesh_vdev = val;
5975 }
5976 
5977 /*
5978  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
5979  * @vdev_hdl: virtual device object
5980  * @val: value to be set
5981  *
5982  * Return: void
5983  */
5984 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
5985 {
5986 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5987 
5988 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5989 		FL("val %d"), val);
5990 	vdev->mesh_rx_filter = val;
5991 }
5992 #endif
5993 
5994 /*
5995  * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
5996  * Current scope is bar received count
5997  *
5998  * @pdev_handle: DP_PDEV handle
5999  *
6000  * Return: void
6001  */
6002 #define STATS_PROC_TIMEOUT        (HZ/1000)
6003 
6004 static void
6005 dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
6006 {
6007 	struct dp_vdev *vdev;
6008 	struct dp_peer *peer;
6009 	uint32_t waitcnt;
6010 
6011 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
6012 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6013 			if (!peer) {
6014 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6015 					FL("DP Invalid Peer refernce"));
6016 				return;
6017 			}
6018 
6019 			if (peer->delete_in_progress) {
6020 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6021 					FL("DP Peer deletion in progress"));
6022 				continue;
6023 			}
6024 			qdf_atomic_inc(&peer->ref_cnt);
6025 			waitcnt = 0;
6026 			dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
6027 			while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
6028 				&& waitcnt < 10) {
6029 				schedule_timeout_interruptible(
6030 						STATS_PROC_TIMEOUT);
6031 				waitcnt++;
6032 			}
6033 			qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
6034 			dp_peer_unref_delete(peer);
6035 		}
6036 	}
6037 }
6038 
6039 /**
6040  * dp_rx_bar_stats_cb(): BAR received stats callback
6041  * @soc: SOC handle
6042  * @cb_ctxt: Call back context
6043  * @reo_status: Reo status
6044  *
6045  * return: void
6046  */
6047 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
6048 	union hal_reo_status *reo_status)
6049 {
6050 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
6051 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
6052 
6053 	if (!qdf_atomic_read(&soc->cmn_init_done))
6054 		return;
6055 
6056 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
6057 		DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
6058 			queue_status->header.status);
6059 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
6060 		return;
6061 	}
6062 
6063 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
6064 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
6065 
6066 }
6067 
6068 /**
6069  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
6070  * @vdev: DP VDEV handle
6071  *
6072  * return: void
6073  */
6074 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
6075 			     struct cdp_vdev_stats *vdev_stats)
6076 {
6077 	struct dp_peer *peer = NULL;
6078 	struct dp_soc *soc = NULL;
6079 
6080 	if (!vdev || !vdev->pdev)
6081 		return;
6082 
6083 	soc = vdev->pdev->soc;
6084 
6085 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
6086 
6087 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
6088 		dp_update_vdev_stats(vdev_stats, peer);
6089 
6090 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6091 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
6092 			     vdev_stats, vdev->vdev_id,
6093 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
6094 #endif
6095 }
6096 
6097 /**
6098  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
6099  * @pdev: DP PDEV handle
6100  *
6101  * return: void
6102  */
6103 static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
6104 {
6105 	struct dp_vdev *vdev = NULL;
6106 	struct dp_soc *soc;
6107 	struct cdp_vdev_stats *vdev_stats =
6108 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
6109 
6110 	if (!vdev_stats) {
6111 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6112 			  "DP alloc failure - unable to get alloc vdev stats");
6113 		return;
6114 	}
6115 
6116 	qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
6117 	qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
6118 	qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
6119 
6120 	if (pdev->mcopy_mode)
6121 		DP_UPDATE_STATS(pdev, pdev->invalid_peer);
6122 
6123 	soc = pdev->soc;
6124 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
6125 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6126 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
6127 
6128 		dp_aggregate_vdev_stats(vdev, vdev_stats);
6129 		dp_update_pdev_stats(pdev, vdev_stats);
6130 		dp_update_pdev_ingress_stats(pdev, vdev);
6131 	}
6132 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6133 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6134 	qdf_mem_free(vdev_stats);
6135 
6136 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6137 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
6138 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
6139 #endif
6140 }
6141 
6142 /**
6143  * dp_vdev_getstats() - get vdev packet level stats
6144  * @vdev_handle: Datapath VDEV handle
6145  * @stats: cdp network device stats structure
6146  *
6147  * Return: void
6148  */
6149 static void dp_vdev_getstats(void *vdev_handle,
6150 		struct cdp_dev_stats *stats)
6151 {
6152 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6153 	struct dp_pdev *pdev;
6154 	struct dp_soc *soc;
6155 	struct cdp_vdev_stats *vdev_stats;
6156 
6157 	if (!vdev)
6158 		return;
6159 
6160 	pdev = vdev->pdev;
6161 	if (!pdev)
6162 		return;
6163 
6164 	soc = pdev->soc;
6165 
6166 	vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
6167 
6168 	if (!vdev_stats) {
6169 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6170 			  "DP alloc failure - unable to get alloc vdev stats");
6171 		return;
6172 	}
6173 
6174 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
6175 	dp_aggregate_vdev_stats(vdev, vdev_stats);
6176 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6177 
6178 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
6179 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
6180 
6181 	stats->tx_errors = vdev_stats->tx.tx_failed +
6182 		vdev_stats->tx_i.dropped.dropped_pkt.num;
6183 	stats->tx_dropped = stats->tx_errors;
6184 
6185 	stats->rx_packets = vdev_stats->rx.unicast.num +
6186 		vdev_stats->rx.multicast.num +
6187 		vdev_stats->rx.bcast.num;
6188 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
6189 		vdev_stats->rx.multicast.bytes +
6190 		vdev_stats->rx.bcast.bytes;
6191 
6192 }
6193 
6194 
6195 /**
6196  * dp_pdev_getstats() - get pdev packet level stats
6197  * @pdev_handle: Datapath PDEV handle
6198  * @stats: cdp network device stats structure
6199  *
6200  * Return: void
6201  */
6202 static void dp_pdev_getstats(void *pdev_handle,
6203 		struct cdp_dev_stats *stats)
6204 {
6205 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6206 
6207 	dp_aggregate_pdev_stats(pdev);
6208 
6209 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
6210 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
6211 
6212 	stats->tx_errors = pdev->stats.tx.tx_failed +
6213 		pdev->stats.tx_i.dropped.dropped_pkt.num;
6214 	stats->tx_dropped = stats->tx_errors;
6215 
6216 	stats->rx_packets = pdev->stats.rx.unicast.num +
6217 		pdev->stats.rx.multicast.num +
6218 		pdev->stats.rx.bcast.num;
6219 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
6220 		pdev->stats.rx.multicast.bytes +
6221 		pdev->stats.rx.bcast.bytes;
6222 }
6223 
6224 /**
6225  * dp_get_device_stats() - get interface level packet stats
6226  * @handle: device handle
6227  * @stats: cdp network device stats structure
6228  * @type: device type pdev/vdev
6229  *
6230  * Return: void
6231  */
6232 static void dp_get_device_stats(void *handle,
6233 		struct cdp_dev_stats *stats, uint8_t type)
6234 {
6235 	switch (type) {
6236 	case UPDATE_VDEV_STATS:
6237 		dp_vdev_getstats(handle, stats);
6238 		break;
6239 	case UPDATE_PDEV_STATS:
6240 		dp_pdev_getstats(handle, stats);
6241 		break;
6242 	default:
6243 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6244 			"apstats cannot be updated for this input "
6245 			"type %d", type);
6246 		break;
6247 	}
6248 
6249 }
6250 
6251 
6252 /**
6253  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
6254  * @pdev: DP_PDEV Handle
6255  *
6256  * Return:void
6257  */
6258 static inline void
6259 dp_print_pdev_tx_stats(struct dp_pdev *pdev)
6260 {
6261 	uint8_t index = 0;
6262 
6263 	DP_PRINT_STATS("PDEV Tx Stats:\n");
6264 	DP_PRINT_STATS("Received From Stack:");
6265 	DP_PRINT_STATS("	Packets = %d",
6266 			pdev->stats.tx_i.rcvd.num);
6267 	DP_PRINT_STATS("	Bytes = %llu",
6268 			pdev->stats.tx_i.rcvd.bytes);
6269 	DP_PRINT_STATS("Processed:");
6270 	DP_PRINT_STATS("	Packets = %d",
6271 			pdev->stats.tx_i.processed.num);
6272 	DP_PRINT_STATS("	Bytes = %llu",
6273 			pdev->stats.tx_i.processed.bytes);
6274 	DP_PRINT_STATS("Total Completions:");
6275 	DP_PRINT_STATS("	Packets = %u",
6276 			pdev->stats.tx.comp_pkt.num);
6277 	DP_PRINT_STATS("	Bytes = %llu",
6278 			pdev->stats.tx.comp_pkt.bytes);
6279 	DP_PRINT_STATS("Successful Completions:");
6280 	DP_PRINT_STATS("	Packets = %u",
6281 			pdev->stats.tx.tx_success.num);
6282 	DP_PRINT_STATS("	Bytes = %llu",
6283 			pdev->stats.tx.tx_success.bytes);
6284 	DP_PRINT_STATS("Dropped:");
6285 	DP_PRINT_STATS("	Total = %d",
6286 			pdev->stats.tx_i.dropped.dropped_pkt.num);
6287 	DP_PRINT_STATS("	Dma_map_error = %d",
6288 			pdev->stats.tx_i.dropped.dma_error);
6289 	DP_PRINT_STATS("	Ring Full = %d",
6290 			pdev->stats.tx_i.dropped.ring_full);
6291 	DP_PRINT_STATS("	Descriptor Not available = %d",
6292 			pdev->stats.tx_i.dropped.desc_na.num);
6293 	DP_PRINT_STATS("	HW enqueue failed= %d",
6294 			pdev->stats.tx_i.dropped.enqueue_fail);
6295 	DP_PRINT_STATS("	Resources Full = %d",
6296 			pdev->stats.tx_i.dropped.res_full);
6297 	DP_PRINT_STATS("	FW removed Pkts = %u",
6298 		       pdev->stats.tx.dropped.fw_rem.num);
6299 	DP_PRINT_STATS("	FW removed bytes= %llu",
6300 		       pdev->stats.tx.dropped.fw_rem.bytes);
6301 	DP_PRINT_STATS("	FW removed transmitted = %d",
6302 			pdev->stats.tx.dropped.fw_rem_tx);
6303 	DP_PRINT_STATS("	FW removed untransmitted = %d",
6304 			pdev->stats.tx.dropped.fw_rem_notx);
6305 	DP_PRINT_STATS("	FW removed untransmitted fw_reason1 = %d",
6306 			pdev->stats.tx.dropped.fw_reason1);
6307 	DP_PRINT_STATS("	FW removed untransmitted fw_reason2 = %d",
6308 			pdev->stats.tx.dropped.fw_reason2);
6309 	DP_PRINT_STATS("	FW removed untransmitted fw_reason3 = %d",
6310 			pdev->stats.tx.dropped.fw_reason3);
6311 	DP_PRINT_STATS("	Aged Out from msdu/mpdu queues = %d",
6312 			pdev->stats.tx.dropped.age_out);
6313 	DP_PRINT_STATS("	headroom insufficient = %d",
6314 			pdev->stats.tx_i.dropped.headroom_insufficient);
6315 	DP_PRINT_STATS("	Multicast:");
6316 	DP_PRINT_STATS("	Packets: %u",
6317 		       pdev->stats.tx.mcast.num);
6318 	DP_PRINT_STATS("	Bytes: %llu",
6319 		       pdev->stats.tx.mcast.bytes);
6320 	DP_PRINT_STATS("Scatter Gather:");
6321 	DP_PRINT_STATS("	Packets = %d",
6322 			pdev->stats.tx_i.sg.sg_pkt.num);
6323 	DP_PRINT_STATS("	Bytes = %llu",
6324 			pdev->stats.tx_i.sg.sg_pkt.bytes);
6325 	DP_PRINT_STATS("	Dropped By Host = %d",
6326 			pdev->stats.tx_i.sg.dropped_host.num);
6327 	DP_PRINT_STATS("	Dropped By Target = %d",
6328 			pdev->stats.tx_i.sg.dropped_target);
6329 	DP_PRINT_STATS("TSO:");
6330 	DP_PRINT_STATS("	Number of Segments = %d",
6331 			pdev->stats.tx_i.tso.num_seg);
6332 	DP_PRINT_STATS("	Packets = %d",
6333 			pdev->stats.tx_i.tso.tso_pkt.num);
6334 	DP_PRINT_STATS("	Bytes = %llu",
6335 			pdev->stats.tx_i.tso.tso_pkt.bytes);
6336 	DP_PRINT_STATS("	Dropped By Host = %d",
6337 			pdev->stats.tx_i.tso.dropped_host.num);
6338 	DP_PRINT_STATS("Mcast Enhancement:");
6339 	DP_PRINT_STATS("	Packets = %d",
6340 			pdev->stats.tx_i.mcast_en.mcast_pkt.num);
6341 	DP_PRINT_STATS("	Bytes = %llu",
6342 			pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
6343 	DP_PRINT_STATS("	Dropped: Map Errors = %d",
6344 			pdev->stats.tx_i.mcast_en.dropped_map_error);
6345 	DP_PRINT_STATS("	Dropped: Self Mac = %d",
6346 			pdev->stats.tx_i.mcast_en.dropped_self_mac);
6347 	DP_PRINT_STATS("	Dropped: Send Fail = %d",
6348 			pdev->stats.tx_i.mcast_en.dropped_send_fail);
6349 	DP_PRINT_STATS("	Unicast sent = %d",
6350 			pdev->stats.tx_i.mcast_en.ucast);
6351 	DP_PRINT_STATS("Raw:");
6352 	DP_PRINT_STATS("	Packets = %d",
6353 			pdev->stats.tx_i.raw.raw_pkt.num);
6354 	DP_PRINT_STATS("	Bytes = %llu",
6355 			pdev->stats.tx_i.raw.raw_pkt.bytes);
6356 	DP_PRINT_STATS("	DMA map error = %d",
6357 			pdev->stats.tx_i.raw.dma_map_error);
6358 	DP_PRINT_STATS("Reinjected:");
6359 	DP_PRINT_STATS("	Packets = %d",
6360 			pdev->stats.tx_i.reinject_pkts.num);
6361 	DP_PRINT_STATS("	Bytes = %llu\n",
6362 			pdev->stats.tx_i.reinject_pkts.bytes);
6363 	DP_PRINT_STATS("Inspected:");
6364 	DP_PRINT_STATS("	Packets = %d",
6365 			pdev->stats.tx_i.inspect_pkts.num);
6366 	DP_PRINT_STATS("	Bytes = %llu",
6367 			pdev->stats.tx_i.inspect_pkts.bytes);
6368 	DP_PRINT_STATS("Nawds Multicast:");
6369 	DP_PRINT_STATS("	Packets = %d",
6370 			pdev->stats.tx_i.nawds_mcast.num);
6371 	DP_PRINT_STATS("	Bytes = %llu",
6372 			pdev->stats.tx_i.nawds_mcast.bytes);
6373 	DP_PRINT_STATS("CCE Classified:");
6374 	DP_PRINT_STATS("	CCE Classified Packets: %u",
6375 			pdev->stats.tx_i.cce_classified);
6376 	DP_PRINT_STATS("	RAW CCE Classified Packets: %u",
6377 			pdev->stats.tx_i.cce_classified_raw);
6378 	DP_PRINT_STATS("Mesh stats:");
6379 	DP_PRINT_STATS("	frames to firmware: %u",
6380 			pdev->stats.tx_i.mesh.exception_fw);
6381 	DP_PRINT_STATS("	completions from fw: %u",
6382 			pdev->stats.tx_i.mesh.completion_fw);
6383 	DP_PRINT_STATS("PPDU stats counter");
6384 	for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
6385 		DP_PRINT_STATS("	Tag[%d] = %llu", index,
6386 				pdev->stats.ppdu_stats_counter[index]);
6387 	}
6388 
6389 }
6390 
6391 /**
6392  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
6393  * @pdev: DP_PDEV Handle
6394  *
6395  * Return: void
6396  */
6397 static inline void
6398 dp_print_pdev_rx_stats(struct dp_pdev *pdev)
6399 {
6400 	DP_PRINT_STATS("PDEV Rx Stats:\n");
6401 	DP_PRINT_STATS("Received From HW (Per Rx Ring):");
6402 	DP_PRINT_STATS("	Packets = %d %d %d %d",
6403 			pdev->stats.rx.rcvd_reo[0].num,
6404 			pdev->stats.rx.rcvd_reo[1].num,
6405 			pdev->stats.rx.rcvd_reo[2].num,
6406 			pdev->stats.rx.rcvd_reo[3].num);
6407 	DP_PRINT_STATS("	Bytes = %llu %llu %llu %llu",
6408 			pdev->stats.rx.rcvd_reo[0].bytes,
6409 			pdev->stats.rx.rcvd_reo[1].bytes,
6410 			pdev->stats.rx.rcvd_reo[2].bytes,
6411 			pdev->stats.rx.rcvd_reo[3].bytes);
6412 	DP_PRINT_STATS("Replenished:");
6413 	DP_PRINT_STATS("	Packets = %d",
6414 			pdev->stats.replenish.pkts.num);
6415 	DP_PRINT_STATS("	Bytes = %llu",
6416 			pdev->stats.replenish.pkts.bytes);
6417 	DP_PRINT_STATS("	Buffers Added To Freelist = %d",
6418 			pdev->stats.buf_freelist);
6419 	DP_PRINT_STATS("	Low threshold intr = %d",
6420 			pdev->stats.replenish.low_thresh_intrs);
6421 	DP_PRINT_STATS("Dropped:");
6422 	DP_PRINT_STATS("	msdu_not_done = %d",
6423 			pdev->stats.dropped.msdu_not_done);
6424 	DP_PRINT_STATS("        mon_rx_drop = %d",
6425 			pdev->stats.dropped.mon_rx_drop);
6426 	DP_PRINT_STATS("        mec_drop = %d",
6427 		       pdev->stats.rx.mec_drop.num);
6428 	DP_PRINT_STATS("	Bytes = %llu",
6429 		       pdev->stats.rx.mec_drop.bytes);
6430 	DP_PRINT_STATS("Sent To Stack:");
6431 	DP_PRINT_STATS("	Packets = %d",
6432 			pdev->stats.rx.to_stack.num);
6433 	DP_PRINT_STATS("	Bytes = %llu",
6434 			pdev->stats.rx.to_stack.bytes);
6435 	DP_PRINT_STATS("Multicast/Broadcast:");
6436 	DP_PRINT_STATS("	Packets = %d",
6437 			pdev->stats.rx.multicast.num);
6438 	DP_PRINT_STATS("	Bytes = %llu",
6439 			pdev->stats.rx.multicast.bytes);
6440 	DP_PRINT_STATS("Errors:");
6441 	DP_PRINT_STATS("	Rxdma Ring Un-inititalized = %d",
6442 			pdev->stats.replenish.rxdma_err);
6443 	DP_PRINT_STATS("	Desc Alloc Failed: = %d",
6444 			pdev->stats.err.desc_alloc_fail);
6445 	DP_PRINT_STATS("	IP checksum error = %d",
6446 		       pdev->stats.err.ip_csum_err);
6447 	DP_PRINT_STATS("	TCP/UDP checksum error = %d",
6448 		       pdev->stats.err.tcp_udp_csum_err);
6449 
6450 	/* Get bar_recv_cnt */
6451 	dp_aggregate_pdev_ctrl_frames_stats(pdev);
6452 	DP_PRINT_STATS("BAR Received Count: = %d",
6453 			pdev->stats.rx.bar_recv_cnt);
6454 
6455 }
6456 
6457 /**
6458  * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
6459  * @pdev: DP_PDEV Handle
6460  *
6461  * Return: void
6462  */
6463 static inline void
6464 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
6465 {
6466 	struct cdp_pdev_mon_stats *rx_mon_stats;
6467 
6468 	rx_mon_stats = &pdev->rx_mon_stats;
6469 
6470 	DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
6471 
6472 	dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
6473 
6474 	DP_PRINT_STATS("status_ppdu_done_cnt = %d",
6475 		       rx_mon_stats->status_ppdu_done);
6476 	DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
6477 		       rx_mon_stats->dest_ppdu_done);
6478 	DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
6479 		       rx_mon_stats->dest_mpdu_done);
6480 	DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
6481 		       rx_mon_stats->dest_mpdu_drop);
6482 	DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d",
6483 		       rx_mon_stats->dup_mon_linkdesc_cnt);
6484 	DP_PRINT_STATS("dup_mon_buf_cnt = %d",
6485 		       rx_mon_stats->dup_mon_buf_cnt);
6486 }
6487 
6488 /**
6489  * dp_print_soc_tx_stats(): Print SOC level  stats
6490  * @soc DP_SOC Handle
6491  *
6492  * Return: void
6493  */
6494 static inline void
6495 dp_print_soc_tx_stats(struct dp_soc *soc)
6496 {
6497 	uint8_t desc_pool_id;
6498 	soc->stats.tx.desc_in_use = 0;
6499 
6500 	DP_PRINT_STATS("SOC Tx Stats:\n");
6501 
6502 	for (desc_pool_id = 0;
6503 	     desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6504 	     desc_pool_id++)
6505 		soc->stats.tx.desc_in_use +=
6506 			soc->tx_desc[desc_pool_id].num_allocated;
6507 
6508 	DP_PRINT_STATS("Tx Descriptors In Use = %d",
6509 			soc->stats.tx.desc_in_use);
6510 	DP_PRINT_STATS("Tx Invalid peer:");
6511 	DP_PRINT_STATS("	Packets = %d",
6512 			soc->stats.tx.tx_invalid_peer.num);
6513 	DP_PRINT_STATS("	Bytes = %llu",
6514 			soc->stats.tx.tx_invalid_peer.bytes);
6515 	DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
6516 			soc->stats.tx.tcl_ring_full[0],
6517 			soc->stats.tx.tcl_ring_full[1],
6518 			soc->stats.tx.tcl_ring_full[2]);
6519 
6520 }
6521 /**
6522  * dp_print_soc_rx_stats: Print SOC level Rx stats
6523  * @soc: DP_SOC Handle
6524  *
6525  * Return:void
6526  */
6527 static inline void
6528 dp_print_soc_rx_stats(struct dp_soc *soc)
6529 {
6530 	uint32_t i;
6531 	char reo_error[DP_REO_ERR_LENGTH];
6532 	char rxdma_error[DP_RXDMA_ERR_LENGTH];
6533 	uint8_t index = 0;
6534 
6535 	DP_PRINT_STATS("SOC Rx Stats:\n");
6536 	DP_PRINT_STATS("Fragmented packets: %u",
6537 		       soc->stats.rx.rx_frags);
6538 	DP_PRINT_STATS("Reo reinjected packets: %u",
6539 		       soc->stats.rx.reo_reinject);
6540 	DP_PRINT_STATS("Errors:\n");
6541 	DP_PRINT_STATS("Rx Decrypt Errors = %d",
6542 			(soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
6543 			soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
6544 	DP_PRINT_STATS("Invalid RBM = %d",
6545 			soc->stats.rx.err.invalid_rbm);
6546 	DP_PRINT_STATS("Invalid Vdev = %d",
6547 			soc->stats.rx.err.invalid_vdev);
6548 	DP_PRINT_STATS("Invalid Pdev = %d",
6549 			soc->stats.rx.err.invalid_pdev);
6550 	DP_PRINT_STATS("Invalid Peer = %d",
6551 			soc->stats.rx.err.rx_invalid_peer.num);
6552 	DP_PRINT_STATS("HAL Ring Access Fail = %d",
6553 			soc->stats.rx.err.hal_ring_access_fail);
6554 	DP_PRINT_STATS("RX frags: %d", soc->stats.rx.rx_frags);
6555 	DP_PRINT_STATS("RX frag wait: %d", soc->stats.rx.rx_frag_wait);
6556 	DP_PRINT_STATS("RX frag err: %d", soc->stats.rx.rx_frag_err);
6557 	DP_PRINT_STATS("RX HP out_of_sync: %d", soc->stats.rx.hp_oos);
6558 	DP_PRINT_STATS("RX DUP DESC: %d",
6559 		       soc->stats.rx.err.hal_reo_dest_dup);
6560 	DP_PRINT_STATS("RX REL DUP DESC: %d",
6561 		       soc->stats.rx.err.hal_wbm_rel_dup);
6562 
6563 	for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
6564 		index += qdf_snprint(&rxdma_error[index],
6565 				DP_RXDMA_ERR_LENGTH - index,
6566 				" %d", soc->stats.rx.err.rxdma_error[i]);
6567 	}
6568 	DP_PRINT_STATS("RXDMA Error (0-31):%s",
6569 			rxdma_error);
6570 
6571 	index = 0;
6572 	for (i = 0; i < HAL_REO_ERR_MAX; i++) {
6573 		index += qdf_snprint(&reo_error[index],
6574 				DP_REO_ERR_LENGTH - index,
6575 				" %d", soc->stats.rx.err.reo_error[i]);
6576 	}
6577 	DP_PRINT_STATS("REO Error(0-14):%s",
6578 			reo_error);
6579 }
6580 
6581 /**
6582  * dp_srng_get_str_from_ring_type() - Return string name for a ring
6583  * @ring_type: Ring
6584  *
6585  * Return: char const pointer
6586  */
6587 static inline const
6588 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
6589 {
6590 	switch (ring_type) {
6591 	case REO_DST:
6592 		return "Reo_dst";
6593 	case REO_EXCEPTION:
6594 		return "Reo_exception";
6595 	case REO_CMD:
6596 		return "Reo_cmd";
6597 	case REO_REINJECT:
6598 		return "Reo_reinject";
6599 	case REO_STATUS:
6600 		return "Reo_status";
6601 	case WBM2SW_RELEASE:
6602 		return "wbm2sw_release";
6603 	case TCL_DATA:
6604 		return "tcl_data";
6605 	case TCL_CMD:
6606 		return "tcl_cmd";
6607 	case TCL_STATUS:
6608 		return "tcl_status";
6609 	case SW2WBM_RELEASE:
6610 		return "sw2wbm_release";
6611 	case RXDMA_BUF:
6612 		return "Rxdma_buf";
6613 	case RXDMA_DST:
6614 		return "Rxdma_dst";
6615 	case RXDMA_MONITOR_BUF:
6616 		return "Rxdma_monitor_buf";
6617 	case RXDMA_MONITOR_DESC:
6618 		return "Rxdma_monitor_desc";
6619 	case RXDMA_MONITOR_STATUS:
6620 		return "Rxdma_monitor_status";
6621 	default:
6622 		dp_err("Invalid ring type");
6623 		break;
6624 	}
6625 	return "Invalid";
6626 }
6627 
6628 /**
6629  * dp_print_ring_stat_from_hal(): Print hal level ring stats
6630  * @soc: DP_SOC handle
6631  * @srng: DP_SRNG handle
6632  * @ring_name: SRNG name
6633  * @ring_type: srng src/dst ring
6634  *
6635  * Return: void
6636  */
6637 static void
6638 dp_print_ring_stat_from_hal(struct dp_soc *soc,  struct dp_srng *srng,
6639 			    enum hal_ring_type ring_type)
6640 {
6641 	uint32_t tailp;
6642 	uint32_t headp;
6643 	int32_t hw_headp = -1;
6644 	int32_t hw_tailp = -1;
6645 	const char *ring_name;
6646 	struct hal_soc *hal_soc;
6647 
6648 	if (soc && srng && srng->hal_srng) {
6649 		hal_soc = (struct hal_soc *)soc->hal_soc;
6650 		ring_name = dp_srng_get_str_from_hal_ring_type(ring_type);
6651 
6652 		hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
6653 
6654 		DP_PRINT_STATS("%s:SW:Head pointer = %d Tail Pointer = %d\n",
6655 			       ring_name, headp, tailp);
6656 
6657 		hal_get_hw_hptp(hal_soc, srng->hal_srng, &hw_headp,
6658 				&hw_tailp, ring_type);
6659 
6660 		DP_PRINT_STATS("%s:HW:Head pointer = %d Tail Pointer = %d\n",
6661 			       ring_name, hw_headp, hw_tailp);
6662 	}
6663 
6664 }
6665 
6666 /**
6667  * dp_print_mon_ring_stats_from_hal() - Print stat for monitor rings based
6668  *					on target
6669  * @pdev: physical device handle
6670  * @mac_id: mac id
6671  *
6672  * Return: void
6673  */
6674 static inline
6675 void dp_print_mon_ring_stat_from_hal(struct dp_pdev *pdev, uint8_t mac_id)
6676 {
6677 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) {
6678 		dp_print_ring_stat_from_hal(pdev->soc,
6679 					    &pdev->rxdma_mon_buf_ring[mac_id],
6680 					    RXDMA_MONITOR_BUF);
6681 		dp_print_ring_stat_from_hal(pdev->soc,
6682 					    &pdev->rxdma_mon_dst_ring[mac_id],
6683 					    RXDMA_MONITOR_DST);
6684 		dp_print_ring_stat_from_hal(pdev->soc,
6685 					    &pdev->rxdma_mon_desc_ring[mac_id],
6686 					    RXDMA_MONITOR_DESC);
6687 	}
6688 
6689 	dp_print_ring_stat_from_hal(pdev->soc,
6690 				    &pdev->rxdma_mon_status_ring[mac_id],
6691 				    RXDMA_MONITOR_STATUS);
6692 }
6693 
6694 /**
6695  * dp_print_ring_stats(): Print tail and head pointer
6696  * @pdev: DP_PDEV handle
6697  *
6698  * Return:void
6699  */
6700 static inline void
6701 dp_print_ring_stats(struct dp_pdev *pdev)
6702 {
6703 	uint32_t i;
6704 	int mac_id;
6705 
6706 	dp_print_ring_stat_from_hal(pdev->soc,
6707 				    &pdev->soc->reo_exception_ring,
6708 				    REO_EXCEPTION);
6709 	dp_print_ring_stat_from_hal(pdev->soc,
6710 				    &pdev->soc->reo_reinject_ring,
6711 				    REO_REINJECT);
6712 	dp_print_ring_stat_from_hal(pdev->soc,
6713 				    &pdev->soc->reo_cmd_ring,
6714 				    REO_CMD);
6715 	dp_print_ring_stat_from_hal(pdev->soc,
6716 				    &pdev->soc->reo_status_ring,
6717 				    REO_STATUS);
6718 	dp_print_ring_stat_from_hal(pdev->soc,
6719 				    &pdev->soc->rx_rel_ring,
6720 				    WBM2SW_RELEASE);
6721 	dp_print_ring_stat_from_hal(pdev->soc,
6722 				    &pdev->soc->tcl_cmd_ring,
6723 				    TCL_CMD);
6724 	dp_print_ring_stat_from_hal(pdev->soc,
6725 				    &pdev->soc->tcl_status_ring,
6726 				    TCL_STATUS);
6727 	dp_print_ring_stat_from_hal(pdev->soc,
6728 				    &pdev->soc->wbm_desc_rel_ring,
6729 				    SW2WBM_RELEASE);
6730 	for (i = 0; i < MAX_REO_DEST_RINGS; i++)
6731 		dp_print_ring_stat_from_hal(pdev->soc,
6732 					    &pdev->soc->reo_dest_ring[i],
6733 					    REO_DST);
6734 
6735 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++)
6736 		dp_print_ring_stat_from_hal(pdev->soc,
6737 					    &pdev->soc->tcl_data_ring[i],
6738 					    TCL_DATA);
6739 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++)
6740 		dp_print_ring_stat_from_hal(pdev->soc,
6741 					    &pdev->soc->tx_comp_ring[i],
6742 					    WBM2SW_RELEASE);
6743 
6744 	dp_print_ring_stat_from_hal(pdev->soc,
6745 				    &pdev->rx_refill_buf_ring,
6746 				    RXDMA_BUF);
6747 
6748 	dp_print_ring_stat_from_hal(pdev->soc,
6749 				    &pdev->rx_refill_buf_ring2,
6750 				    RXDMA_BUF);
6751 
6752 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
6753 		dp_print_ring_stat_from_hal(pdev->soc,
6754 					    &pdev->rx_mac_buf_ring[i],
6755 					    RXDMA_BUF);
6756 
6757 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
6758 		dp_print_mon_ring_stat_from_hal(pdev, mac_id);
6759 
6760 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++)
6761 		dp_print_ring_stat_from_hal(pdev->soc,
6762 					    &pdev->rxdma_err_dst_ring[i],
6763 					    RXDMA_DST);
6764 
6765 }
6766 
6767 /**
6768  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
6769  * @vdev: DP_VDEV handle
6770  *
6771  * Return:void
6772  */
6773 static inline void
6774 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
6775 {
6776 	struct dp_peer *peer = NULL;
6777 
6778 	if (!vdev || !vdev->pdev)
6779 		return;
6780 
6781 	DP_STATS_CLR(vdev->pdev);
6782 	DP_STATS_CLR(vdev->pdev->soc);
6783 	DP_STATS_CLR(vdev);
6784 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6785 		if (!peer)
6786 			return;
6787 		DP_STATS_CLR(peer);
6788 
6789 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6790 		dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
6791 				     &peer->stats,  peer->peer_ids[0],
6792 				     UPDATE_PEER_STATS, vdev->pdev->pdev_id);
6793 #endif
6794 	}
6795 
6796 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6797 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
6798 			     &vdev->stats,  vdev->vdev_id,
6799 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
6800 #endif
6801 }
6802 
6803 /**
6804  * dp_print_common_rates_info(): Print common rate for tx or rx
6805  * @pkt_type_array: rate type array contains rate info
6806  *
6807  * Return:void
6808  */
6809 static inline void
6810 dp_print_common_rates_info(struct cdp_pkt_type *pkt_type_array)
6811 {
6812 	uint8_t mcs, pkt_type;
6813 
6814 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
6815 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
6816 			if (!dp_rate_string[pkt_type][mcs].valid)
6817 				continue;
6818 
6819 			DP_PRINT_STATS("	%s = %d",
6820 				       dp_rate_string[pkt_type][mcs].mcs_type,
6821 				       pkt_type_array[pkt_type].mcs_count[mcs]);
6822 		}
6823 
6824 		DP_PRINT_STATS("\n");
6825 	}
6826 }
6827 
6828 /**
6829  * dp_print_rx_rates(): Print Rx rate stats
6830  * @vdev: DP_VDEV handle
6831  *
6832  * Return:void
6833  */
6834 static inline void
6835 dp_print_rx_rates(struct dp_vdev *vdev)
6836 {
6837 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6838 	uint8_t i;
6839 	uint8_t index = 0;
6840 	char nss[DP_NSS_LENGTH];
6841 
6842 	DP_PRINT_STATS("Rx Rate Info:\n");
6843 	dp_print_common_rates_info(pdev->stats.rx.pkt_type);
6844 
6845 
6846 	index = 0;
6847 	for (i = 0; i < SS_COUNT; i++) {
6848 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6849 				" %d", pdev->stats.rx.nss[i]);
6850 	}
6851 	DP_PRINT_STATS("NSS(1-8) = %s",
6852 			nss);
6853 
6854 	DP_PRINT_STATS("SGI ="
6855 			" 0.8us %d,"
6856 			" 0.4us %d,"
6857 			" 1.6us %d,"
6858 			" 3.2us %d,",
6859 			pdev->stats.rx.sgi_count[0],
6860 			pdev->stats.rx.sgi_count[1],
6861 			pdev->stats.rx.sgi_count[2],
6862 			pdev->stats.rx.sgi_count[3]);
6863 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
6864 			pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
6865 			pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
6866 	DP_PRINT_STATS("Reception Type ="
6867 			" SU: %d,"
6868 			" MU_MIMO:%d,"
6869 			" MU_OFDMA:%d,"
6870 			" MU_OFDMA_MIMO:%d\n",
6871 			pdev->stats.rx.reception_type[0],
6872 			pdev->stats.rx.reception_type[1],
6873 			pdev->stats.rx.reception_type[2],
6874 			pdev->stats.rx.reception_type[3]);
6875 	DP_PRINT_STATS("Aggregation:\n");
6876 	DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
6877 			pdev->stats.rx.ampdu_cnt);
6878 	DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
6879 			pdev->stats.rx.non_ampdu_cnt);
6880 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
6881 			pdev->stats.rx.amsdu_cnt);
6882 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
6883 			pdev->stats.rx.non_amsdu_cnt);
6884 }
6885 
6886 /**
6887  * dp_print_tx_rates(): Print tx rates
6888  * @vdev: DP_VDEV handle
6889  *
6890  * Return:void
6891  */
6892 static inline void
6893 dp_print_tx_rates(struct dp_vdev *vdev)
6894 {
6895 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6896 	uint8_t index;
6897 	char nss[DP_NSS_LENGTH];
6898 	int nss_index;
6899 
6900 	DP_PRINT_STATS("Tx Rate Info:\n");
6901 	dp_print_common_rates_info(pdev->stats.tx.pkt_type);
6902 
6903 	DP_PRINT_STATS("SGI ="
6904 			" 0.8us %d"
6905 			" 0.4us %d"
6906 			" 1.6us %d"
6907 			" 3.2us %d",
6908 			pdev->stats.tx.sgi_count[0],
6909 			pdev->stats.tx.sgi_count[1],
6910 			pdev->stats.tx.sgi_count[2],
6911 			pdev->stats.tx.sgi_count[3]);
6912 
6913 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
6914 			pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
6915 			pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
6916 
6917 	index = 0;
6918 	for (nss_index = 0; nss_index < SS_COUNT; nss_index++) {
6919 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6920 				" %d", pdev->stats.tx.nss[nss_index]);
6921 	}
6922 
6923 	DP_PRINT_STATS("NSS(1-8) = %s", nss);
6924 	DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
6925 	DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
6926 	DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
6927 	DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
6928 	DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
6929 
6930 	DP_PRINT_STATS("Aggregation:\n");
6931 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
6932 			pdev->stats.tx.amsdu_cnt);
6933 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
6934 			pdev->stats.tx.non_amsdu_cnt);
6935 }
6936 
6937 /**
6938  * dp_print_peer_stats():print peer stats
6939  * @peer: DP_PEER handle
6940  *
6941  * return void
6942  */
6943 static inline void dp_print_peer_stats(struct dp_peer *peer)
6944 {
6945 	uint8_t i;
6946 	uint32_t index;
6947 	uint32_t j;
6948 	char nss[DP_NSS_LENGTH];
6949 	char mu_group_id[DP_MU_GROUP_LENGTH];
6950 
6951 	DP_PRINT_STATS("Node Tx Stats:\n");
6952 	DP_PRINT_STATS("Total Packet Completions = %d",
6953 			peer->stats.tx.comp_pkt.num);
6954 	DP_PRINT_STATS("Total Bytes Completions = %llu",
6955 			peer->stats.tx.comp_pkt.bytes);
6956 	DP_PRINT_STATS("Success Packets = %d",
6957 			peer->stats.tx.tx_success.num);
6958 	DP_PRINT_STATS("Success Bytes = %llu",
6959 			peer->stats.tx.tx_success.bytes);
6960 	DP_PRINT_STATS("Unicast Success Packets = %d",
6961 			peer->stats.tx.ucast.num);
6962 	DP_PRINT_STATS("Unicast Success Bytes = %llu",
6963 			peer->stats.tx.ucast.bytes);
6964 	DP_PRINT_STATS("Multicast Success Packets = %d",
6965 			peer->stats.tx.mcast.num);
6966 	DP_PRINT_STATS("Multicast Success Bytes = %llu",
6967 			peer->stats.tx.mcast.bytes);
6968 	DP_PRINT_STATS("Broadcast Success Packets = %d",
6969 			peer->stats.tx.bcast.num);
6970 	DP_PRINT_STATS("Broadcast Success Bytes = %llu",
6971 			peer->stats.tx.bcast.bytes);
6972 	DP_PRINT_STATS("Packets Failed = %d",
6973 			peer->stats.tx.tx_failed);
6974 	DP_PRINT_STATS("Packets In OFDMA = %d",
6975 			peer->stats.tx.ofdma);
6976 	DP_PRINT_STATS("Packets In STBC = %d",
6977 			peer->stats.tx.stbc);
6978 	DP_PRINT_STATS("Packets In LDPC = %d",
6979 			peer->stats.tx.ldpc);
6980 	DP_PRINT_STATS("Packet Retries = %d",
6981 			peer->stats.tx.retries);
6982 	DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
6983 			peer->stats.tx.amsdu_cnt);
6984 	DP_PRINT_STATS("Last Packet RSSI = %d",
6985 			peer->stats.tx.last_ack_rssi);
6986 	DP_PRINT_STATS("Dropped At FW: Removed Pkts = %u",
6987 		       peer->stats.tx.dropped.fw_rem.num);
6988 	DP_PRINT_STATS("Dropped At FW: Removed bytes = %llu",
6989 		       peer->stats.tx.dropped.fw_rem.bytes);
6990 	DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
6991 			peer->stats.tx.dropped.fw_rem_tx);
6992 	DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
6993 			peer->stats.tx.dropped.fw_rem_notx);
6994 	DP_PRINT_STATS("Dropped : Age Out = %d",
6995 			peer->stats.tx.dropped.age_out);
6996 	DP_PRINT_STATS("NAWDS : ");
6997 	DP_PRINT_STATS("	Nawds multicast Drop Tx Packet = %d",
6998 			peer->stats.tx.nawds_mcast_drop);
6999 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Count = %d",
7000 			peer->stats.tx.nawds_mcast.num);
7001 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Bytes = %llu",
7002 			peer->stats.tx.nawds_mcast.bytes);
7003 
7004 	DP_PRINT_STATS("Rate Info:");
7005 	dp_print_common_rates_info(peer->stats.tx.pkt_type);
7006 
7007 
7008 	DP_PRINT_STATS("SGI = "
7009 			" 0.8us %d"
7010 			" 0.4us %d"
7011 			" 1.6us %d"
7012 			" 3.2us %d",
7013 			peer->stats.tx.sgi_count[0],
7014 			peer->stats.tx.sgi_count[1],
7015 			peer->stats.tx.sgi_count[2],
7016 			peer->stats.tx.sgi_count[3]);
7017 	DP_PRINT_STATS("Excess Retries per AC ");
7018 	DP_PRINT_STATS("	 Best effort = %d",
7019 			peer->stats.tx.excess_retries_per_ac[0]);
7020 	DP_PRINT_STATS("	 Background= %d",
7021 			peer->stats.tx.excess_retries_per_ac[1]);
7022 	DP_PRINT_STATS("	 Video = %d",
7023 			peer->stats.tx.excess_retries_per_ac[2]);
7024 	DP_PRINT_STATS("	 Voice = %d",
7025 			peer->stats.tx.excess_retries_per_ac[3]);
7026 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
7027 			peer->stats.tx.bw[0], peer->stats.tx.bw[1],
7028 			peer->stats.tx.bw[2], peer->stats.tx.bw[3]);
7029 
7030 	index = 0;
7031 	for (i = 0; i < SS_COUNT; i++) {
7032 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
7033 				" %d", peer->stats.tx.nss[i]);
7034 	}
7035 	DP_PRINT_STATS("NSS(1-8) = %s", nss);
7036 
7037 	DP_PRINT_STATS("Transmit Type :");
7038 	DP_PRINT_STATS("SU %d, MU_MIMO %d, MU_OFDMA %d, MU_MIMO_OFDMA %d",
7039 		       peer->stats.tx.transmit_type[0],
7040 		       peer->stats.tx.transmit_type[1],
7041 		       peer->stats.tx.transmit_type[2],
7042 		       peer->stats.tx.transmit_type[3]);
7043 
7044 	for (i = 0; i < MAX_MU_GROUP_ID;) {
7045 		index = 0;
7046 		for (j = 0; j < DP_MU_GROUP_SHOW && i < MAX_MU_GROUP_ID;
7047 		     j++) {
7048 			index += qdf_snprint(&mu_group_id[index],
7049 					     DP_MU_GROUP_LENGTH - index,
7050 					     " %d",
7051 					     peer->stats.tx.mu_group_id[i]);
7052 			i++;
7053 		}
7054 
7055 		DP_PRINT_STATS("User position list for GID %02d->%d: [%s]",
7056 			       i - DP_MU_GROUP_SHOW, i - 1, mu_group_id);
7057 	}
7058 
7059 	DP_PRINT_STATS("Last Packet RU index [%d], Size [%d]",
7060 		       peer->stats.tx.ru_start, peer->stats.tx.ru_tones);
7061 	DP_PRINT_STATS("RU Locations RU[26 52 106 242 484 996]:");
7062 	DP_PRINT_STATS("RU_26: %d", peer->stats.tx.ru_loc[0]);
7063 	DP_PRINT_STATS("RU 52: %d", peer->stats.tx.ru_loc[1]);
7064 	DP_PRINT_STATS("RU 106: %d", peer->stats.tx.ru_loc[2]);
7065 	DP_PRINT_STATS("RU 242: %d", peer->stats.tx.ru_loc[3]);
7066 	DP_PRINT_STATS("RU 484: %d", peer->stats.tx.ru_loc[4]);
7067 	DP_PRINT_STATS("RU 996: %d", peer->stats.tx.ru_loc[5]);
7068 
7069 	DP_PRINT_STATS("Aggregation:");
7070 	DP_PRINT_STATS("	Number of Msdu's Part of Amsdu = %d",
7071 			peer->stats.tx.amsdu_cnt);
7072 	DP_PRINT_STATS("	Number of Msdu's With No Msdu Level Aggregation = %d\n",
7073 			peer->stats.tx.non_amsdu_cnt);
7074 
7075 	DP_PRINT_STATS("Bytes and Packets transmitted  in last one sec:");
7076 	DP_PRINT_STATS("	Bytes transmitted in last sec: %d",
7077 		       peer->stats.tx.tx_byte_rate);
7078 	DP_PRINT_STATS("	Data transmitted in last sec: %d",
7079 		       peer->stats.tx.tx_data_rate);
7080 
7081 	DP_PRINT_STATS("Node Rx Stats:");
7082 	DP_PRINT_STATS("Packets Sent To Stack = %d",
7083 			peer->stats.rx.to_stack.num);
7084 	DP_PRINT_STATS("Bytes Sent To Stack = %llu",
7085 			peer->stats.rx.to_stack.bytes);
7086 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
7087 		DP_PRINT_STATS("Ring Id = %d", i);
7088 		DP_PRINT_STATS("	Packets Received = %d",
7089 				peer->stats.rx.rcvd_reo[i].num);
7090 		DP_PRINT_STATS("	Bytes Received = %llu",
7091 				peer->stats.rx.rcvd_reo[i].bytes);
7092 	}
7093 	DP_PRINT_STATS("Multicast Packets Received = %d",
7094 			peer->stats.rx.multicast.num);
7095 	DP_PRINT_STATS("Multicast Bytes Received = %llu",
7096 			peer->stats.rx.multicast.bytes);
7097 	DP_PRINT_STATS("Broadcast Packets Received = %d",
7098 			peer->stats.rx.bcast.num);
7099 	DP_PRINT_STATS("Broadcast Bytes Received = %llu",
7100 			peer->stats.rx.bcast.bytes);
7101 	DP_PRINT_STATS("Intra BSS Packets Received = %d",
7102 			peer->stats.rx.intra_bss.pkts.num);
7103 	DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
7104 			peer->stats.rx.intra_bss.pkts.bytes);
7105 	DP_PRINT_STATS("Raw Packets Received = %d",
7106 			peer->stats.rx.raw.num);
7107 	DP_PRINT_STATS("Raw Bytes Received = %llu",
7108 			peer->stats.rx.raw.bytes);
7109 	DP_PRINT_STATS("Errors: MIC Errors = %d",
7110 			peer->stats.rx.err.mic_err);
7111 	DP_PRINT_STATS("Erros: Decryption Errors = %d",
7112 			peer->stats.rx.err.decrypt_err);
7113 	DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
7114 			peer->stats.rx.non_ampdu_cnt);
7115 	DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
7116 			peer->stats.rx.ampdu_cnt);
7117 	DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
7118 			peer->stats.rx.non_amsdu_cnt);
7119 	DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
7120 			peer->stats.rx.amsdu_cnt);
7121 	DP_PRINT_STATS("NAWDS : ");
7122 	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet = %d",
7123 			peer->stats.rx.nawds_mcast_drop);
7124 	DP_PRINT_STATS("SGI ="
7125 			" 0.8us %d"
7126 			" 0.4us %d"
7127 			" 1.6us %d"
7128 			" 3.2us %d",
7129 			peer->stats.rx.sgi_count[0],
7130 			peer->stats.rx.sgi_count[1],
7131 			peer->stats.rx.sgi_count[2],
7132 			peer->stats.rx.sgi_count[3]);
7133 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
7134 			peer->stats.rx.bw[0], peer->stats.rx.bw[1],
7135 			peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
7136 	DP_PRINT_STATS("Reception Type ="
7137 			" SU %d,"
7138 			" MU_MIMO %d,"
7139 			" MU_OFDMA %d,"
7140 			" MU_OFDMA_MIMO %d",
7141 			peer->stats.rx.reception_type[0],
7142 			peer->stats.rx.reception_type[1],
7143 			peer->stats.rx.reception_type[2],
7144 			peer->stats.rx.reception_type[3]);
7145 
7146 	dp_print_common_rates_info(peer->stats.rx.pkt_type);
7147 
7148 	index = 0;
7149 	for (i = 0; i < SS_COUNT; i++) {
7150 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
7151 				" %d", peer->stats.rx.nss[i]);
7152 	}
7153 	DP_PRINT_STATS("NSS(1-8) = %s",
7154 			nss);
7155 
7156 	DP_PRINT_STATS("Aggregation:");
7157 	DP_PRINT_STATS("	Msdu's Part of Ampdu = %d",
7158 			peer->stats.rx.ampdu_cnt);
7159 	DP_PRINT_STATS("	Msdu's With No Mpdu Level Aggregation = %d",
7160 			peer->stats.rx.non_ampdu_cnt);
7161 	DP_PRINT_STATS("	Msdu's Part of Amsdu = %d",
7162 			peer->stats.rx.amsdu_cnt);
7163 	DP_PRINT_STATS("	Msdu's With No Msdu Level Aggregation = %d",
7164 			peer->stats.rx.non_amsdu_cnt);
7165 
7166 	DP_PRINT_STATS("Bytes and Packets received in last one sec:");
7167 	DP_PRINT_STATS("	Bytes received in last sec: %d",
7168 		       peer->stats.rx.rx_byte_rate);
7169 	DP_PRINT_STATS("	Data received in last sec: %d",
7170 		       peer->stats.rx.rx_data_rate);
7171 }
7172 
7173 /*
7174  * dp_get_host_peer_stats()- function to print peer stats
7175  * @pdev_handle: DP_PDEV handle
7176  * @mac_addr: mac address of the peer
7177  *
7178  * Return: void
7179  */
7180 static void
7181 dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
7182 {
7183 	struct dp_peer *peer;
7184 	uint8_t local_id;
7185 
7186 	if (!mac_addr) {
7187 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7188 			  "Invalid MAC address\n");
7189 		return;
7190 	}
7191 
7192 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
7193 			&local_id);
7194 
7195 	if (!peer) {
7196 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7197 			  "%s: Invalid peer\n", __func__);
7198 		return;
7199 	}
7200 
7201 	dp_print_peer_stats(peer);
7202 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
7203 }
7204 
7205 /**
7206  * dp_print_soc_cfg_params()- Dump soc wlan config parameters
7207  * @soc_handle: Soc handle
7208  *
7209  * Return: void
7210  */
7211 static void
7212 dp_print_soc_cfg_params(struct dp_soc *soc)
7213 {
7214 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
7215 	uint8_t index = 0, i = 0;
7216 	char ring_mask[DP_MAX_INT_CONTEXTS_STRING_LENGTH];
7217 	int num_of_int_contexts;
7218 
7219 	if (!soc) {
7220 		dp_err("Context is null");
7221 		return;
7222 	}
7223 
7224 	soc_cfg_ctx = soc->wlan_cfg_ctx;
7225 
7226 	if (!soc_cfg_ctx) {
7227 		dp_err("Context is null");
7228 		return;
7229 	}
7230 
7231 	num_of_int_contexts =
7232 			wlan_cfg_get_num_contexts(soc_cfg_ctx);
7233 
7234 	DP_TRACE_STATS(DEBUG, "No. of interrupt contexts: %u",
7235 		       soc_cfg_ctx->num_int_ctxts);
7236 	DP_TRACE_STATS(DEBUG, "Max clients: %u",
7237 		       soc_cfg_ctx->max_clients);
7238 	DP_TRACE_STATS(DEBUG, "Max alloc size: %u ",
7239 		       soc_cfg_ctx->max_alloc_size);
7240 	DP_TRACE_STATS(DEBUG, "Per pdev tx ring: %u ",
7241 		       soc_cfg_ctx->per_pdev_tx_ring);
7242 	DP_TRACE_STATS(DEBUG, "Num tcl data rings: %u ",
7243 		       soc_cfg_ctx->num_tcl_data_rings);
7244 	DP_TRACE_STATS(DEBUG, "Per pdev rx ring: %u ",
7245 		       soc_cfg_ctx->per_pdev_rx_ring);
7246 	DP_TRACE_STATS(DEBUG, "Per pdev lmac ring: %u ",
7247 		       soc_cfg_ctx->per_pdev_lmac_ring);
7248 	DP_TRACE_STATS(DEBUG, "Num of reo dest rings: %u ",
7249 		       soc_cfg_ctx->num_reo_dest_rings);
7250 	DP_TRACE_STATS(DEBUG, "Num tx desc pool: %u ",
7251 		       soc_cfg_ctx->num_tx_desc_pool);
7252 	DP_TRACE_STATS(DEBUG, "Num tx ext desc pool: %u ",
7253 		       soc_cfg_ctx->num_tx_ext_desc_pool);
7254 	DP_TRACE_STATS(DEBUG, "Num tx desc: %u ",
7255 		       soc_cfg_ctx->num_tx_desc);
7256 	DP_TRACE_STATS(DEBUG, "Num tx ext desc: %u ",
7257 		       soc_cfg_ctx->num_tx_ext_desc);
7258 	DP_TRACE_STATS(DEBUG, "Htt packet type: %u ",
7259 		       soc_cfg_ctx->htt_packet_type);
7260 	DP_TRACE_STATS(DEBUG, "Max peer_ids: %u ",
7261 		       soc_cfg_ctx->max_peer_id);
7262 	DP_TRACE_STATS(DEBUG, "Tx ring size: %u ",
7263 		       soc_cfg_ctx->tx_ring_size);
7264 	DP_TRACE_STATS(DEBUG, "Tx comp ring size: %u ",
7265 		       soc_cfg_ctx->tx_comp_ring_size);
7266 	DP_TRACE_STATS(DEBUG, "Tx comp ring size nss: %u ",
7267 		       soc_cfg_ctx->tx_comp_ring_size_nss);
7268 	DP_TRACE_STATS(DEBUG, "Int batch threshold tx: %u ",
7269 		       soc_cfg_ctx->int_batch_threshold_tx);
7270 	DP_TRACE_STATS(DEBUG, "Int timer threshold tx: %u ",
7271 		       soc_cfg_ctx->int_timer_threshold_tx);
7272 	DP_TRACE_STATS(DEBUG, "Int batch threshold rx: %u ",
7273 		       soc_cfg_ctx->int_batch_threshold_rx);
7274 	DP_TRACE_STATS(DEBUG, "Int timer threshold rx: %u ",
7275 		       soc_cfg_ctx->int_timer_threshold_rx);
7276 	DP_TRACE_STATS(DEBUG, "Int batch threshold other: %u ",
7277 		       soc_cfg_ctx->int_batch_threshold_other);
7278 	DP_TRACE_STATS(DEBUG, "Int timer threshold other: %u ",
7279 		       soc_cfg_ctx->int_timer_threshold_other);
7280 
7281 	for (i = 0; i < num_of_int_contexts; i++) {
7282 		index += qdf_snprint(&ring_mask[index],
7283 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7284 				     " %d",
7285 				     soc_cfg_ctx->int_tx_ring_mask[i]);
7286 	}
7287 
7288 	DP_TRACE_STATS(DEBUG, "Tx ring mask (0-%d):%s",
7289 		       num_of_int_contexts, ring_mask);
7290 
7291 	index = 0;
7292 	for (i = 0; i < num_of_int_contexts; i++) {
7293 		index += qdf_snprint(&ring_mask[index],
7294 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7295 				     " %d",
7296 				     soc_cfg_ctx->int_rx_ring_mask[i]);
7297 	}
7298 
7299 	DP_TRACE_STATS(DEBUG, "Rx ring mask (0-%d):%s",
7300 		       num_of_int_contexts, ring_mask);
7301 
7302 	index = 0;
7303 	for (i = 0; i < num_of_int_contexts; i++) {
7304 		index += qdf_snprint(&ring_mask[index],
7305 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7306 				     " %d",
7307 				     soc_cfg_ctx->int_rx_mon_ring_mask[i]);
7308 	}
7309 
7310 	DP_TRACE_STATS(DEBUG, "Rx mon ring mask (0-%d):%s",
7311 		       num_of_int_contexts, ring_mask);
7312 
7313 	index = 0;
7314 	for (i = 0; i < num_of_int_contexts; i++) {
7315 		index += qdf_snprint(&ring_mask[index],
7316 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7317 				     " %d",
7318 				     soc_cfg_ctx->int_rx_err_ring_mask[i]);
7319 	}
7320 
7321 	DP_TRACE_STATS(DEBUG, "Rx err ring mask (0-%d):%s",
7322 		       num_of_int_contexts, ring_mask);
7323 
7324 	index = 0;
7325 	for (i = 0; i < num_of_int_contexts; i++) {
7326 		index += qdf_snprint(&ring_mask[index],
7327 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7328 				     " %d",
7329 				     soc_cfg_ctx->int_rx_wbm_rel_ring_mask[i]);
7330 	}
7331 
7332 	DP_TRACE_STATS(DEBUG, "Rx wbm rel ring mask (0-%d):%s",
7333 		       num_of_int_contexts, ring_mask);
7334 
7335 	index = 0;
7336 	for (i = 0; i < num_of_int_contexts; i++) {
7337 		index += qdf_snprint(&ring_mask[index],
7338 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7339 				     " %d",
7340 				     soc_cfg_ctx->int_reo_status_ring_mask[i]);
7341 	}
7342 
7343 	DP_TRACE_STATS(DEBUG, "Reo ring mask (0-%d):%s",
7344 		       num_of_int_contexts, ring_mask);
7345 
7346 	index = 0;
7347 	for (i = 0; i < num_of_int_contexts; i++) {
7348 		index += qdf_snprint(&ring_mask[index],
7349 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7350 				     " %d",
7351 				     soc_cfg_ctx->int_rxdma2host_ring_mask[i]);
7352 	}
7353 
7354 	DP_TRACE_STATS(DEBUG, "Rxdma2host ring mask (0-%d):%s",
7355 		       num_of_int_contexts, ring_mask);
7356 
7357 	index = 0;
7358 	for (i = 0; i < num_of_int_contexts; i++) {
7359 		index += qdf_snprint(&ring_mask[index],
7360 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7361 				     " %d",
7362 				     soc_cfg_ctx->int_host2rxdma_ring_mask[i]);
7363 	}
7364 
7365 	DP_TRACE_STATS(DEBUG, "Host2rxdma ring mask (0-%d):%s",
7366 		       num_of_int_contexts, ring_mask);
7367 
7368 	DP_TRACE_STATS(DEBUG, "Rx hash: %u ",
7369 		       soc_cfg_ctx->rx_hash);
7370 	DP_TRACE_STATS(DEBUG, "Tso enabled: %u ",
7371 		       soc_cfg_ctx->tso_enabled);
7372 	DP_TRACE_STATS(DEBUG, "Lro enabled: %u ",
7373 		       soc_cfg_ctx->lro_enabled);
7374 	DP_TRACE_STATS(DEBUG, "Sg enabled: %u ",
7375 		       soc_cfg_ctx->sg_enabled);
7376 	DP_TRACE_STATS(DEBUG, "Gro enabled: %u ",
7377 		       soc_cfg_ctx->gro_enabled);
7378 	DP_TRACE_STATS(DEBUG, "rawmode enabled: %u ",
7379 		       soc_cfg_ctx->rawmode_enabled);
7380 	DP_TRACE_STATS(DEBUG, "peer flow ctrl enabled: %u ",
7381 		       soc_cfg_ctx->peer_flow_ctrl_enabled);
7382 	DP_TRACE_STATS(DEBUG, "napi enabled: %u ",
7383 		       soc_cfg_ctx->napi_enabled);
7384 	DP_TRACE_STATS(DEBUG, "Tcp Udp checksum offload: %u ",
7385 		       soc_cfg_ctx->tcp_udp_checksumoffload);
7386 	DP_TRACE_STATS(DEBUG, "Defrag timeout check: %u ",
7387 		       soc_cfg_ctx->defrag_timeout_check);
7388 	DP_TRACE_STATS(DEBUG, "Rx defrag min timeout: %u ",
7389 		       soc_cfg_ctx->rx_defrag_min_timeout);
7390 	DP_TRACE_STATS(DEBUG, "WBM release ring: %u ",
7391 		       soc_cfg_ctx->wbm_release_ring);
7392 	DP_TRACE_STATS(DEBUG, "TCL CMD ring: %u ",
7393 		       soc_cfg_ctx->tcl_cmd_ring);
7394 	DP_TRACE_STATS(DEBUG, "TCL Status ring: %u ",
7395 		       soc_cfg_ctx->tcl_status_ring);
7396 	DP_TRACE_STATS(DEBUG, "REO Reinject ring: %u ",
7397 		       soc_cfg_ctx->reo_reinject_ring);
7398 	DP_TRACE_STATS(DEBUG, "RX release ring: %u ",
7399 		       soc_cfg_ctx->rx_release_ring);
7400 	DP_TRACE_STATS(DEBUG, "REO Exception ring: %u ",
7401 		       soc_cfg_ctx->reo_exception_ring);
7402 	DP_TRACE_STATS(DEBUG, "REO CMD ring: %u ",
7403 		       soc_cfg_ctx->reo_cmd_ring);
7404 	DP_TRACE_STATS(DEBUG, "REO STATUS ring: %u ",
7405 		       soc_cfg_ctx->reo_status_ring);
7406 	DP_TRACE_STATS(DEBUG, "RXDMA refill ring: %u ",
7407 		       soc_cfg_ctx->rxdma_refill_ring);
7408 	DP_TRACE_STATS(DEBUG, "RXDMA err dst ring: %u ",
7409 		       soc_cfg_ctx->rxdma_err_dst_ring);
7410 }
7411 
7412 /**
7413  * dp_print_vdev_cfg_params() - Print the pdev cfg parameters
7414  * @pdev_handle: DP pdev handle
7415  *
7416  * Return - void
7417  */
7418 static void
7419 dp_print_pdev_cfg_params(struct dp_pdev *pdev)
7420 {
7421 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
7422 
7423 	if (!pdev) {
7424 		dp_err("Context is null");
7425 		return;
7426 	}
7427 
7428 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
7429 
7430 	if (!pdev_cfg_ctx) {
7431 		dp_err("Context is null");
7432 		return;
7433 	}
7434 
7435 	DP_TRACE_STATS(DEBUG, "Rx dma buf ring size: %d ",
7436 		       pdev_cfg_ctx->rx_dma_buf_ring_size);
7437 	DP_TRACE_STATS(DEBUG, "DMA Mon buf ring size: %d ",
7438 		       pdev_cfg_ctx->dma_mon_buf_ring_size);
7439 	DP_TRACE_STATS(DEBUG, "DMA Mon dest ring size: %d ",
7440 		       pdev_cfg_ctx->dma_mon_dest_ring_size);
7441 	DP_TRACE_STATS(DEBUG, "DMA Mon status ring size: %d ",
7442 		       pdev_cfg_ctx->dma_mon_status_ring_size);
7443 	DP_TRACE_STATS(DEBUG, "Rxdma monitor desc ring: %d",
7444 		       pdev_cfg_ctx->rxdma_monitor_desc_ring);
7445 	DP_TRACE_STATS(DEBUG, "Num mac rings: %d ",
7446 		       pdev_cfg_ctx->num_mac_rings);
7447 }
7448 
7449 /**
7450  * dp_txrx_stats_help() - Helper function for Txrx_Stats
7451  *
7452  * Return: None
7453  */
7454 static void dp_txrx_stats_help(void)
7455 {
7456 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
7457 	dp_info("stats_option:");
7458 	dp_info("  1 -- HTT Tx Statistics");
7459 	dp_info("  2 -- HTT Rx Statistics");
7460 	dp_info("  3 -- HTT Tx HW Queue Statistics");
7461 	dp_info("  4 -- HTT Tx HW Sched Statistics");
7462 	dp_info("  5 -- HTT Error Statistics");
7463 	dp_info("  6 -- HTT TQM Statistics");
7464 	dp_info("  7 -- HTT TQM CMDQ Statistics");
7465 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
7466 	dp_info("  9 -- HTT Tx Rate Statistics");
7467 	dp_info(" 10 -- HTT Rx Rate Statistics");
7468 	dp_info(" 11 -- HTT Peer Statistics");
7469 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
7470 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
7471 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
7472 	dp_info(" 15 -- HTT SRNG Statistics");
7473 	dp_info(" 16 -- HTT SFM Info Statistics");
7474 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
7475 	dp_info(" 18 -- HTT Peer List Details");
7476 	dp_info(" 20 -- Clear Host Statistics");
7477 	dp_info(" 21 -- Host Rx Rate Statistics");
7478 	dp_info(" 22 -- Host Tx Rate Statistics");
7479 	dp_info(" 23 -- Host Tx Statistics");
7480 	dp_info(" 24 -- Host Rx Statistics");
7481 	dp_info(" 25 -- Host AST Statistics");
7482 	dp_info(" 26 -- Host SRNG PTR Statistics");
7483 	dp_info(" 27 -- Host Mon Statistics");
7484 	dp_info(" 28 -- Host REO Queue Statistics");
7485 	dp_info(" 29 -- Host Soc cfg param Statistics");
7486 	dp_info(" 30 -- Host pdev cfg param Statistics");
7487 }
7488 
7489 /**
7490  * dp_print_host_stats()- Function to print the stats aggregated at host
7491  * @vdev_handle: DP_VDEV handle
7492  * @type: host stats type
7493  *
7494  * Return: 0 on success, print error message in case of failure
7495  */
7496 static int
7497 dp_print_host_stats(struct cdp_vdev *vdev_handle,
7498 		    struct cdp_txrx_stats_req *req)
7499 {
7500 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7501 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7502 	enum cdp_host_txrx_stats type =
7503 			dp_stats_mapping_table[req->stats][STATS_HOST];
7504 
7505 	dp_aggregate_pdev_stats(pdev);
7506 
7507 	switch (type) {
7508 	case TXRX_CLEAR_STATS:
7509 		dp_txrx_host_stats_clr(vdev);
7510 		break;
7511 	case TXRX_RX_RATE_STATS:
7512 		dp_print_rx_rates(vdev);
7513 		break;
7514 	case TXRX_TX_RATE_STATS:
7515 		dp_print_tx_rates(vdev);
7516 		break;
7517 	case TXRX_TX_HOST_STATS:
7518 		dp_print_pdev_tx_stats(pdev);
7519 		dp_print_soc_tx_stats(pdev->soc);
7520 		break;
7521 	case TXRX_RX_HOST_STATS:
7522 		dp_print_pdev_rx_stats(pdev);
7523 		dp_print_soc_rx_stats(pdev->soc);
7524 		break;
7525 	case TXRX_AST_STATS:
7526 		dp_print_ast_stats(pdev->soc);
7527 		dp_print_peer_table(vdev);
7528 		break;
7529 	case TXRX_SRNG_PTR_STATS:
7530 		dp_print_ring_stats(pdev);
7531 		break;
7532 	case TXRX_RX_MON_STATS:
7533 		dp_print_pdev_rx_mon_stats(pdev);
7534 		break;
7535 	case TXRX_REO_QUEUE_STATS:
7536 		dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
7537 		break;
7538 	case TXRX_SOC_CFG_PARAMS:
7539 		dp_print_soc_cfg_params(pdev->soc);
7540 		break;
7541 	case TXRX_PDEV_CFG_PARAMS:
7542 		dp_print_pdev_cfg_params(pdev);
7543 		break;
7544 	default:
7545 		dp_info("Wrong Input For TxRx Host Stats");
7546 		dp_txrx_stats_help();
7547 		break;
7548 	}
7549 	return 0;
7550 }
7551 
7552 /*
7553  * dp_ppdu_ring_reset()- Reset PPDU Stats ring
7554  * @pdev: DP_PDEV handle
7555  *
7556  * Return: void
7557  */
7558 static void
7559 dp_ppdu_ring_reset(struct dp_pdev *pdev)
7560 {
7561 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
7562 	int mac_id;
7563 
7564 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
7565 
7566 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
7567 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
7568 							pdev->pdev_id);
7569 
7570 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
7571 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
7572 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
7573 	}
7574 }
7575 
7576 /*
7577  * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
7578  * @pdev: DP_PDEV handle
7579  *
7580  * Return: void
7581  */
7582 static void
7583 dp_ppdu_ring_cfg(struct dp_pdev *pdev)
7584 {
7585 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
7586 	int mac_id;
7587 
7588 	htt_tlv_filter.mpdu_start = 1;
7589 	htt_tlv_filter.msdu_start = 0;
7590 	htt_tlv_filter.packet = 0;
7591 	htt_tlv_filter.msdu_end = 0;
7592 	htt_tlv_filter.mpdu_end = 0;
7593 	htt_tlv_filter.attention = 0;
7594 	htt_tlv_filter.ppdu_start = 1;
7595 	htt_tlv_filter.ppdu_end = 1;
7596 	htt_tlv_filter.ppdu_end_user_stats = 1;
7597 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
7598 	htt_tlv_filter.ppdu_end_status_done = 1;
7599 	htt_tlv_filter.enable_fp = 1;
7600 	htt_tlv_filter.enable_md = 0;
7601 	if (pdev->neighbour_peers_added &&
7602 	    pdev->soc->hw_nac_monitor_support) {
7603 		htt_tlv_filter.enable_md = 1;
7604 		htt_tlv_filter.packet_header = 1;
7605 	}
7606 	if (pdev->mcopy_mode) {
7607 		htt_tlv_filter.packet_header = 1;
7608 		htt_tlv_filter.enable_mo = 1;
7609 	}
7610 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
7611 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
7612 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
7613 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
7614 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
7615 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
7616 	if (pdev->neighbour_peers_added &&
7617 	    pdev->soc->hw_nac_monitor_support)
7618 		htt_tlv_filter.md_data_filter = FILTER_DATA_ALL;
7619 
7620 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
7621 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
7622 						pdev->pdev_id);
7623 
7624 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
7625 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
7626 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
7627 	}
7628 }
7629 
7630 /*
7631  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
7632  *                              modes are enabled or not.
7633  * @dp_pdev: dp pdev handle.
7634  *
7635  * Return: bool
7636  */
7637 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
7638 {
7639 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
7640 	    !pdev->mcopy_mode)
7641 		return true;
7642 	else
7643 		return false;
7644 }
7645 
7646 /*
7647  *dp_set_bpr_enable() - API to enable/disable bpr feature
7648  *@pdev_handle: DP_PDEV handle.
7649  *@val: Provided value.
7650  *
7651  *Return: 0 for success. nonzero for failure.
7652  */
7653 static QDF_STATUS
7654 dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
7655 {
7656 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7657 
7658 	switch (val) {
7659 	case CDP_BPR_DISABLE:
7660 		pdev->bpr_enable = CDP_BPR_DISABLE;
7661 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7662 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
7663 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7664 		} else if (pdev->enhanced_stats_en &&
7665 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7666 			   !pdev->pktlog_ppdu_stats) {
7667 			dp_h2t_cfg_stats_msg_send(pdev,
7668 						  DP_PPDU_STATS_CFG_ENH_STATS,
7669 						  pdev->pdev_id);
7670 		}
7671 		break;
7672 	case CDP_BPR_ENABLE:
7673 		pdev->bpr_enable = CDP_BPR_ENABLE;
7674 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
7675 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
7676 			dp_h2t_cfg_stats_msg_send(pdev,
7677 						  DP_PPDU_STATS_CFG_BPR,
7678 						  pdev->pdev_id);
7679 		} else if (pdev->enhanced_stats_en &&
7680 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7681 			   !pdev->pktlog_ppdu_stats) {
7682 			dp_h2t_cfg_stats_msg_send(pdev,
7683 						  DP_PPDU_STATS_CFG_BPR_ENH,
7684 						  pdev->pdev_id);
7685 		} else if (pdev->pktlog_ppdu_stats) {
7686 			dp_h2t_cfg_stats_msg_send(pdev,
7687 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
7688 						  pdev->pdev_id);
7689 		}
7690 		break;
7691 	default:
7692 		break;
7693 	}
7694 
7695 	return QDF_STATUS_SUCCESS;
7696 }
7697 
7698 /*
7699  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
7700  * @pdev_handle: DP_PDEV handle
7701  * @val: user provided value
7702  *
7703  * Return: 0 for success. nonzero for failure.
7704  */
7705 static QDF_STATUS
7706 dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
7707 {
7708 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7709 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7710 
7711 	if (pdev->mcopy_mode)
7712 		dp_reset_monitor_mode(pdev_handle);
7713 
7714 	switch (val) {
7715 	case 0:
7716 		pdev->tx_sniffer_enable = 0;
7717 		pdev->mcopy_mode = 0;
7718 		pdev->monitor_configured = false;
7719 
7720 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7721 		    !pdev->bpr_enable) {
7722 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7723 			dp_ppdu_ring_reset(pdev);
7724 		} else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
7725 			dp_h2t_cfg_stats_msg_send(pdev,
7726 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7727 		} else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
7728 			dp_h2t_cfg_stats_msg_send(pdev,
7729 						  DP_PPDU_STATS_CFG_BPR_ENH,
7730 						  pdev->pdev_id);
7731 		} else {
7732 			dp_h2t_cfg_stats_msg_send(pdev,
7733 						  DP_PPDU_STATS_CFG_BPR,
7734 						  pdev->pdev_id);
7735 		}
7736 		break;
7737 
7738 	case 1:
7739 		pdev->tx_sniffer_enable = 1;
7740 		pdev->mcopy_mode = 0;
7741 		pdev->monitor_configured = false;
7742 
7743 		if (!pdev->pktlog_ppdu_stats)
7744 			dp_h2t_cfg_stats_msg_send(pdev,
7745 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7746 		break;
7747 	case 2:
7748 		if (pdev->monitor_vdev) {
7749 			status = QDF_STATUS_E_RESOURCES;
7750 			break;
7751 		}
7752 
7753 		pdev->mcopy_mode = 1;
7754 		dp_pdev_configure_monitor_rings(pdev);
7755 		pdev->monitor_configured = true;
7756 		pdev->tx_sniffer_enable = 0;
7757 
7758 		if (!pdev->pktlog_ppdu_stats)
7759 			dp_h2t_cfg_stats_msg_send(pdev,
7760 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7761 		break;
7762 	default:
7763 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7764 			"Invalid value");
7765 		break;
7766 	}
7767 	return status;
7768 }
7769 
7770 /*
7771  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
7772  * @pdev_handle: DP_PDEV handle
7773  *
7774  * Return: void
7775  */
7776 static void
7777 dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
7778 {
7779 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7780 
7781 	if (pdev->enhanced_stats_en == 0)
7782 		dp_cal_client_timer_start(pdev->cal_client_ctx);
7783 
7784 	pdev->enhanced_stats_en = 1;
7785 
7786 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
7787 	    !pdev->monitor_vdev)
7788 		dp_ppdu_ring_cfg(pdev);
7789 
7790 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7791 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7792 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7793 		dp_h2t_cfg_stats_msg_send(pdev,
7794 					  DP_PPDU_STATS_CFG_BPR_ENH,
7795 					  pdev->pdev_id);
7796 	}
7797 }
7798 
7799 /*
7800  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
7801  * @pdev_handle: DP_PDEV handle
7802  *
7803  * Return: void
7804  */
7805 static void
7806 dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
7807 {
7808 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7809 
7810 	if (pdev->enhanced_stats_en == 1)
7811 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
7812 
7813 	pdev->enhanced_stats_en = 0;
7814 
7815 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7816 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7817 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7818 		dp_h2t_cfg_stats_msg_send(pdev,
7819 					  DP_PPDU_STATS_CFG_BPR,
7820 					  pdev->pdev_id);
7821 	}
7822 
7823 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
7824 	    !pdev->monitor_vdev)
7825 		dp_ppdu_ring_reset(pdev);
7826 }
7827 
7828 /*
7829  * dp_get_fw_peer_stats()- function to print peer stats
7830  * @pdev_handle: DP_PDEV handle
7831  * @mac_addr: mac address of the peer
7832  * @cap: Type of htt stats requested
7833  *
7834  * Currently Supporting only MAC ID based requests Only
7835  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
7836  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
7837  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
7838  *
7839  * Return: void
7840  */
7841 static void
7842 dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
7843 		uint32_t cap)
7844 {
7845 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7846 	int i;
7847 	uint32_t config_param0 = 0;
7848 	uint32_t config_param1 = 0;
7849 	uint32_t config_param2 = 0;
7850 	uint32_t config_param3 = 0;
7851 
7852 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
7853 	config_param0 |= (1 << (cap + 1));
7854 
7855 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
7856 		config_param1 |= (1 << i);
7857 	}
7858 
7859 	config_param2 |= (mac_addr[0] & 0x000000ff);
7860 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
7861 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
7862 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
7863 
7864 	config_param3 |= (mac_addr[4] & 0x000000ff);
7865 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
7866 
7867 	dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
7868 			config_param0, config_param1, config_param2,
7869 			config_param3, 0, 0, 0);
7870 
7871 }
7872 
7873 /* This struct definition will be removed from here
7874  * once it get added in FW headers*/
7875 struct httstats_cmd_req {
7876     uint32_t    config_param0;
7877     uint32_t    config_param1;
7878     uint32_t    config_param2;
7879     uint32_t    config_param3;
7880     int cookie;
7881     u_int8_t    stats_id;
7882 };
7883 
7884 /*
7885  * dp_get_htt_stats: function to process the httstas request
7886  * @pdev_handle: DP pdev handle
7887  * @data: pointer to request data
7888  * @data_len: length for request data
7889  *
7890  * return: void
7891  */
7892 static void
7893 dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
7894 {
7895 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7896 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
7897 
7898 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
7899 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
7900 				req->config_param0, req->config_param1,
7901 				req->config_param2, req->config_param3,
7902 				req->cookie, 0, 0);
7903 }
7904 
7905 /*
7906  * dp_set_pdev_param: function to set parameters in pdev
7907  * @pdev_handle: DP pdev handle
7908  * @param: parameter type to be set
7909  * @val: value of parameter to be set
7910  *
7911  * Return: 0 for success. nonzero for failure.
7912  */
7913 static QDF_STATUS dp_set_pdev_param(struct cdp_pdev *pdev_handle,
7914 				    enum cdp_pdev_param_type param,
7915 				    uint8_t val)
7916 {
7917 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7918 	switch (param) {
7919 	case CDP_CONFIG_DEBUG_SNIFFER:
7920 		return dp_config_debug_sniffer(pdev_handle, val);
7921 	case CDP_CONFIG_BPR_ENABLE:
7922 		return dp_set_bpr_enable(pdev_handle, val);
7923 	case CDP_CONFIG_PRIMARY_RADIO:
7924 		pdev->is_primary = val;
7925 		break;
7926 	default:
7927 		return QDF_STATUS_E_INVAL;
7928 	}
7929 	return QDF_STATUS_SUCCESS;
7930 }
7931 
7932 /*
7933  * dp_get_vdev_param: function to get parameters from vdev
7934  * @param: parameter type to get value
7935  *
7936  * return: void
7937  */
7938 static uint32_t dp_get_vdev_param(struct cdp_vdev *vdev_handle,
7939 				  enum cdp_vdev_param_type param)
7940 {
7941 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7942 	uint32_t val;
7943 
7944 	switch (param) {
7945 	case CDP_ENABLE_WDS:
7946 		val = vdev->wds_enabled;
7947 		break;
7948 	case CDP_ENABLE_MEC:
7949 		val = vdev->mec_enabled;
7950 		break;
7951 	case CDP_ENABLE_DA_WAR:
7952 		val = vdev->pdev->soc->da_war_enabled;
7953 		break;
7954 	default:
7955 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7956 			  "param value %d is wrong\n",
7957 			  param);
7958 		val = -1;
7959 		break;
7960 	}
7961 
7962 	return val;
7963 }
7964 
7965 /*
7966  * dp_set_vdev_param: function to set parameters in vdev
7967  * @param: parameter type to be set
7968  * @val: value of parameter to be set
7969  *
7970  * return: void
7971  */
7972 static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
7973 		enum cdp_vdev_param_type param, uint32_t val)
7974 {
7975 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7976 	switch (param) {
7977 	case CDP_ENABLE_WDS:
7978 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7979 			  "wds_enable %d for vdev(%p) id(%d)\n",
7980 			  val, vdev, vdev->vdev_id);
7981 		vdev->wds_enabled = val;
7982 		break;
7983 	case CDP_ENABLE_MEC:
7984 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7985 			  "mec_enable %d for vdev(%p) id(%d)\n",
7986 			  val, vdev, vdev->vdev_id);
7987 		vdev->mec_enabled = val;
7988 		break;
7989 	case CDP_ENABLE_DA_WAR:
7990 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7991 			  "da_war_enable %d for vdev(%p) id(%d)\n",
7992 			  val, vdev, vdev->vdev_id);
7993 		vdev->pdev->soc->da_war_enabled = val;
7994 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
7995 					     vdev->pdev->soc));
7996 		break;
7997 	case CDP_ENABLE_NAWDS:
7998 		vdev->nawds_enabled = val;
7999 		break;
8000 	case CDP_ENABLE_MCAST_EN:
8001 		vdev->mcast_enhancement_en = val;
8002 		break;
8003 	case CDP_ENABLE_PROXYSTA:
8004 		vdev->proxysta_vdev = val;
8005 		break;
8006 	case CDP_UPDATE_TDLS_FLAGS:
8007 		vdev->tdls_link_connected = val;
8008 		break;
8009 	case CDP_CFG_WDS_AGING_TIMER:
8010 		if (val == 0)
8011 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
8012 		else if (val != vdev->wds_aging_timer_val)
8013 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, val);
8014 
8015 		vdev->wds_aging_timer_val = val;
8016 		break;
8017 	case CDP_ENABLE_AP_BRIDGE:
8018 		if (wlan_op_mode_sta != vdev->opmode)
8019 			vdev->ap_bridge_enabled = val;
8020 		else
8021 			vdev->ap_bridge_enabled = false;
8022 		break;
8023 	case CDP_ENABLE_CIPHER:
8024 		vdev->sec_type = val;
8025 		break;
8026 	case CDP_ENABLE_QWRAP_ISOLATION:
8027 		vdev->isolation_vdev = val;
8028 		break;
8029 	default:
8030 		break;
8031 	}
8032 
8033 	dp_tx_vdev_update_search_flags(vdev);
8034 }
8035 
8036 /**
8037  * dp_peer_set_nawds: set nawds bit in peer
8038  * @peer_handle: pointer to peer
8039  * @value: enable/disable nawds
8040  *
8041  * return: void
8042  */
8043 static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
8044 {
8045 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8046 	peer->nawds_enabled = value;
8047 }
8048 
8049 /*
8050  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
8051  * @vdev_handle: DP_VDEV handle
8052  * @map_id:ID of map that needs to be updated
8053  *
8054  * Return: void
8055  */
8056 static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
8057 		uint8_t map_id)
8058 {
8059 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8060 	vdev->dscp_tid_map_id = map_id;
8061 	return;
8062 }
8063 
8064 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
8065  * @peer_handle: DP pdev handle
8066  *
8067  * return : cdp_pdev_stats pointer
8068  */
8069 static struct cdp_pdev_stats*
8070 dp_txrx_get_pdev_stats(struct cdp_pdev *pdev_handle)
8071 {
8072 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8073 
8074 	dp_aggregate_pdev_stats(pdev);
8075 
8076 	return &pdev->stats;
8077 }
8078 
8079 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
8080  * @peer_handle: DP_PEER handle
8081  *
8082  * return : cdp_peer_stats pointer
8083  */
8084 static struct cdp_peer_stats*
8085 		dp_txrx_get_peer_stats(struct cdp_peer *peer_handle)
8086 {
8087 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8088 
8089 	qdf_assert(peer);
8090 
8091 	return &peer->stats;
8092 }
8093 
8094 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
8095  * @peer_handle: DP_PEER handle
8096  *
8097  * return : void
8098  */
8099 static void dp_txrx_reset_peer_stats(struct cdp_peer *peer_handle)
8100 {
8101 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8102 
8103 	qdf_assert(peer);
8104 
8105 	qdf_mem_set(&peer->stats, sizeof(peer->stats), 0);
8106 }
8107 
8108 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
8109  * @vdev_handle: DP_VDEV handle
8110  * @buf: buffer for vdev stats
8111  *
8112  * return : int
8113  */
8114 static int  dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
8115 				   bool is_aggregate)
8116 {
8117 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8118 	struct cdp_vdev_stats *vdev_stats;
8119 	struct dp_pdev *pdev;
8120 	struct dp_soc *soc;
8121 
8122 	if (!vdev)
8123 		return 1;
8124 
8125 	pdev = vdev->pdev;
8126 	if (!pdev)
8127 		return 1;
8128 
8129 	soc = pdev->soc;
8130 	vdev_stats = (struct cdp_vdev_stats *)buf;
8131 
8132 	if (is_aggregate) {
8133 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
8134 		dp_aggregate_vdev_stats(vdev, buf);
8135 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
8136 	} else {
8137 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
8138 	}
8139 
8140 	return 0;
8141 }
8142 
8143 /*
8144  * dp_get_total_per(): get total per
8145  * @pdev_handle: DP_PDEV handle
8146  *
8147  * Return: % error rate using retries per packet and success packets
8148  */
8149 static int dp_get_total_per(struct cdp_pdev *pdev_handle)
8150 {
8151 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8152 
8153 	dp_aggregate_pdev_stats(pdev);
8154 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
8155 		return 0;
8156 	return ((pdev->stats.tx.retries * 100) /
8157 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
8158 }
8159 
8160 /*
8161  * dp_txrx_stats_publish(): publish pdev stats into a buffer
8162  * @pdev_handle: DP_PDEV handle
8163  * @buf: to hold pdev_stats
8164  *
8165  * Return: int
8166  */
8167 static int
8168 dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
8169 {
8170 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8171 	struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
8172 	struct cdp_txrx_stats_req req = {0,};
8173 
8174 	dp_aggregate_pdev_stats(pdev);
8175 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
8176 	req.cookie_val = 1;
8177 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
8178 				req.param1, req.param2, req.param3, 0,
8179 				req.cookie_val, 0);
8180 
8181 	msleep(DP_MAX_SLEEP_TIME);
8182 
8183 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
8184 	req.cookie_val = 1;
8185 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
8186 				req.param1, req.param2, req.param3, 0,
8187 				req.cookie_val, 0);
8188 
8189 	msleep(DP_MAX_SLEEP_TIME);
8190 	qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
8191 
8192 	return TXRX_STATS_LEVEL;
8193 }
8194 
8195 /**
8196  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
8197  * @pdev: DP_PDEV handle
8198  * @map_id: ID of map that needs to be updated
8199  * @tos: index value in map
8200  * @tid: tid value passed by the user
8201  *
8202  * Return: void
8203  */
8204 static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
8205 		uint8_t map_id, uint8_t tos, uint8_t tid)
8206 {
8207 	uint8_t dscp;
8208 	struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
8209 	struct dp_soc *soc = pdev->soc;
8210 
8211 	if (!soc)
8212 		return;
8213 
8214 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
8215 	pdev->dscp_tid_map[map_id][dscp] = tid;
8216 
8217 	if (map_id < soc->num_hw_dscp_tid_map)
8218 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
8219 				       map_id, dscp);
8220 	return;
8221 }
8222 
8223 /**
8224  * dp_hmmc_tid_override_en_wifi3(): Function to enable hmmc tid override.
8225  * @pdev_handle: pdev handle
8226  * @val: hmmc-dscp flag value
8227  *
8228  * Return: void
8229  */
8230 static void dp_hmmc_tid_override_en_wifi3(struct cdp_pdev *pdev_handle,
8231 					  bool val)
8232 {
8233 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8234 
8235 	pdev->hmmc_tid_override_en = val;
8236 }
8237 
8238 /**
8239  * dp_set_hmmc_tid_val_wifi3(): Function to set hmmc tid value.
8240  * @pdev_handle: pdev handle
8241  * @tid: tid value
8242  *
8243  * Return: void
8244  */
8245 static void dp_set_hmmc_tid_val_wifi3(struct cdp_pdev *pdev_handle,
8246 				      uint8_t tid)
8247 {
8248 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8249 
8250 	pdev->hmmc_tid = tid;
8251 }
8252 
8253 /**
8254  * dp_fw_stats_process(): Process TxRX FW stats request
8255  * @vdev_handle: DP VDEV handle
8256  * @req: stats request
8257  *
8258  * return: int
8259  */
8260 static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
8261 		struct cdp_txrx_stats_req *req)
8262 {
8263 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8264 	struct dp_pdev *pdev = NULL;
8265 	uint32_t stats = req->stats;
8266 	uint8_t mac_id = req->mac_id;
8267 
8268 	if (!vdev) {
8269 		DP_TRACE(NONE, "VDEV not found");
8270 		return 1;
8271 	}
8272 	pdev = vdev->pdev;
8273 
8274 	/*
8275 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
8276 	 * from param0 to param3 according to below rule:
8277 	 *
8278 	 * PARAM:
8279 	 *   - config_param0 : start_offset (stats type)
8280 	 *   - config_param1 : stats bmask from start offset
8281 	 *   - config_param2 : stats bmask from start offset + 32
8282 	 *   - config_param3 : stats bmask from start offset + 64
8283 	 */
8284 	if (req->stats == CDP_TXRX_STATS_0) {
8285 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
8286 		req->param1 = 0xFFFFFFFF;
8287 		req->param2 = 0xFFFFFFFF;
8288 		req->param3 = 0xFFFFFFFF;
8289 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
8290 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
8291 	}
8292 
8293 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
8294 				req->param1, req->param2, req->param3,
8295 				0, 0, mac_id);
8296 }
8297 
8298 /**
8299  * dp_txrx_stats_request - function to map to firmware and host stats
8300  * @vdev: virtual handle
8301  * @req: stats request
8302  *
8303  * Return: QDF_STATUS
8304  */
8305 static
8306 QDF_STATUS dp_txrx_stats_request(struct cdp_vdev *vdev,
8307 				 struct cdp_txrx_stats_req *req)
8308 {
8309 	int host_stats;
8310 	int fw_stats;
8311 	enum cdp_stats stats;
8312 	int num_stats;
8313 
8314 	if (!vdev || !req) {
8315 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8316 				"Invalid vdev/req instance");
8317 		return QDF_STATUS_E_INVAL;
8318 	}
8319 
8320 	stats = req->stats;
8321 	if (stats >= CDP_TXRX_MAX_STATS)
8322 		return QDF_STATUS_E_INVAL;
8323 
8324 	/*
8325 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
8326 	 *			has to be updated if new FW HTT stats added
8327 	 */
8328 	if (stats > CDP_TXRX_STATS_HTT_MAX)
8329 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
8330 
8331 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
8332 
8333 	if (stats >= num_stats) {
8334 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8335 			  "%s: Invalid stats option: %d", __func__, stats);
8336 		return QDF_STATUS_E_INVAL;
8337 	}
8338 
8339 	req->stats = stats;
8340 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
8341 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
8342 
8343 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8344 		 "stats: %u fw_stats_type: %d host_stats: %d",
8345 		  stats, fw_stats, host_stats);
8346 
8347 	if (fw_stats != TXRX_FW_STATS_INVALID) {
8348 		/* update request with FW stats type */
8349 		req->stats = fw_stats;
8350 		return dp_fw_stats_process(vdev, req);
8351 	}
8352 
8353 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
8354 			(host_stats <= TXRX_HOST_STATS_MAX))
8355 		return dp_print_host_stats(vdev, req);
8356 	else
8357 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8358 				"Wrong Input for TxRx Stats");
8359 
8360 	return QDF_STATUS_SUCCESS;
8361 }
8362 
8363 /*
8364  * dp_print_napi_stats(): NAPI stats
8365  * @soc - soc handle
8366  */
8367 static void dp_print_napi_stats(struct dp_soc *soc)
8368 {
8369 	hif_print_napi_stats(soc->hif_handle);
8370 }
8371 
8372 /*
8373  * dp_print_per_ring_stats(): Packet count per ring
8374  * @soc - soc handle
8375  */
8376 static void dp_print_per_ring_stats(struct dp_soc *soc)
8377 {
8378 	uint8_t ring;
8379 	uint16_t core;
8380 	uint64_t total_packets;
8381 
8382 	DP_TRACE_STATS(INFO_HIGH, "Reo packets per ring:");
8383 	for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
8384 		total_packets = 0;
8385 		DP_TRACE_STATS(INFO_HIGH,
8386 			       "Packets on ring %u:", ring);
8387 		for (core = 0; core < NR_CPUS; core++) {
8388 			DP_TRACE_STATS(INFO_HIGH,
8389 				       "Packets arriving on core %u: %llu",
8390 				       core,
8391 				       soc->stats.rx.ring_packets[core][ring]);
8392 			total_packets += soc->stats.rx.ring_packets[core][ring];
8393 		}
8394 		DP_TRACE_STATS(INFO_HIGH,
8395 			       "Total packets on ring %u: %llu",
8396 			       ring, total_packets);
8397 	}
8398 }
8399 
8400 /*
8401  * dp_txrx_path_stats() - Function to display dump stats
8402  * @soc - soc handle
8403  *
8404  * return: none
8405  */
8406 static void dp_txrx_path_stats(struct dp_soc *soc)
8407 {
8408 	uint8_t error_code;
8409 	uint8_t loop_pdev;
8410 	struct dp_pdev *pdev;
8411 	uint8_t i;
8412 
8413 	if (!soc) {
8414 		DP_TRACE(ERROR, "%s: Invalid access",
8415 			 __func__);
8416 		return;
8417 	}
8418 
8419 	for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
8420 
8421 		pdev = soc->pdev_list[loop_pdev];
8422 		dp_aggregate_pdev_stats(pdev);
8423 		DP_TRACE_STATS(INFO_HIGH, "Tx path Statistics:");
8424 		DP_TRACE_STATS(INFO_HIGH, "from stack: %u msdus (%llu bytes)",
8425 			       pdev->stats.tx_i.rcvd.num,
8426 			       pdev->stats.tx_i.rcvd.bytes);
8427 		DP_TRACE_STATS(INFO_HIGH,
8428 			       "processed from host: %u msdus (%llu bytes)",
8429 			       pdev->stats.tx_i.processed.num,
8430 			       pdev->stats.tx_i.processed.bytes);
8431 		DP_TRACE_STATS(INFO_HIGH,
8432 			       "successfully transmitted: %u msdus (%llu bytes)",
8433 			       pdev->stats.tx.tx_success.num,
8434 			       pdev->stats.tx.tx_success.bytes);
8435 
8436 		DP_TRACE_STATS(INFO_HIGH, "Dropped in host:");
8437 		DP_TRACE_STATS(INFO_HIGH, "Total packets dropped: %u,",
8438 			       pdev->stats.tx_i.dropped.dropped_pkt.num);
8439 		DP_TRACE_STATS(INFO_HIGH, "Descriptor not available: %u",
8440 			       pdev->stats.tx_i.dropped.desc_na.num);
8441 		DP_TRACE_STATS(INFO_HIGH, "Ring full: %u",
8442 			       pdev->stats.tx_i.dropped.ring_full);
8443 		DP_TRACE_STATS(INFO_HIGH, "Enqueue fail: %u",
8444 			       pdev->stats.tx_i.dropped.enqueue_fail);
8445 		DP_TRACE_STATS(INFO_HIGH, "DMA Error: %u",
8446 			       pdev->stats.tx_i.dropped.dma_error);
8447 
8448 		DP_TRACE_STATS(INFO_HIGH, "Dropped in hardware:");
8449 		DP_TRACE_STATS(INFO_HIGH, "total packets dropped: %u",
8450 			       pdev->stats.tx.tx_failed);
8451 		DP_TRACE_STATS(INFO_HIGH, "mpdu age out: %u",
8452 			       pdev->stats.tx.dropped.age_out);
8453 		DP_TRACE_STATS(INFO_HIGH, "firmware removed packets: %u",
8454 			       pdev->stats.tx.dropped.fw_rem.num);
8455 		DP_TRACE_STATS(INFO_HIGH, "firmware removed bytes: %llu",
8456 			       pdev->stats.tx.dropped.fw_rem.bytes);
8457 		DP_TRACE_STATS(INFO_HIGH, "firmware removed tx: %u",
8458 			       pdev->stats.tx.dropped.fw_rem_tx);
8459 		DP_TRACE_STATS(INFO_HIGH, "firmware removed notx %u",
8460 			       pdev->stats.tx.dropped.fw_rem_notx);
8461 		DP_TRACE_STATS(INFO_HIGH, "Invalid peer on tx path: %u",
8462 			       pdev->soc->stats.tx.tx_invalid_peer.num);
8463 
8464 		DP_TRACE_STATS(INFO_HIGH, "Tx packets sent per interrupt:");
8465 		DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
8466 			       pdev->stats.tx_comp_histogram.pkts_1);
8467 		DP_TRACE_STATS(INFO_HIGH, "2-20 Packets:  %u",
8468 			       pdev->stats.tx_comp_histogram.pkts_2_20);
8469 		DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
8470 			       pdev->stats.tx_comp_histogram.pkts_21_40);
8471 		DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
8472 			       pdev->stats.tx_comp_histogram.pkts_41_60);
8473 		DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
8474 			       pdev->stats.tx_comp_histogram.pkts_61_80);
8475 		DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
8476 			       pdev->stats.tx_comp_histogram.pkts_81_100);
8477 		DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
8478 			       pdev->stats.tx_comp_histogram.pkts_101_200);
8479 		DP_TRACE_STATS(INFO_HIGH, "   201+ Packets: %u",
8480 			       pdev->stats.tx_comp_histogram.pkts_201_plus);
8481 
8482 		DP_TRACE_STATS(INFO_HIGH, "Rx path statistics");
8483 
8484 		DP_TRACE_STATS(INFO_HIGH,
8485 			       "delivered %u msdus ( %llu bytes),",
8486 			       pdev->stats.rx.to_stack.num,
8487 			       pdev->stats.rx.to_stack.bytes);
8488 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)
8489 			DP_TRACE_STATS(INFO_HIGH,
8490 				       "received on reo[%d] %u msdus( %llu bytes),",
8491 				       i, pdev->stats.rx.rcvd_reo[i].num,
8492 				       pdev->stats.rx.rcvd_reo[i].bytes);
8493 		DP_TRACE_STATS(INFO_HIGH,
8494 			       "intra-bss packets %u msdus ( %llu bytes),",
8495 			       pdev->stats.rx.intra_bss.pkts.num,
8496 			       pdev->stats.rx.intra_bss.pkts.bytes);
8497 		DP_TRACE_STATS(INFO_HIGH,
8498 			       "intra-bss fails %u msdus ( %llu bytes),",
8499 			       pdev->stats.rx.intra_bss.fail.num,
8500 			       pdev->stats.rx.intra_bss.fail.bytes);
8501 		DP_TRACE_STATS(INFO_HIGH,
8502 			       "raw packets %u msdus ( %llu bytes),",
8503 			       pdev->stats.rx.raw.num,
8504 			       pdev->stats.rx.raw.bytes);
8505 		DP_TRACE_STATS(INFO_HIGH, "dropped: error %u msdus",
8506 			       pdev->stats.rx.err.mic_err);
8507 		DP_TRACE_STATS(INFO_HIGH, "Invalid peer on rx path: %u",
8508 			       pdev->soc->stats.rx.err.rx_invalid_peer.num);
8509 
8510 		DP_TRACE_STATS(INFO_HIGH, "Reo Statistics");
8511 		DP_TRACE_STATS(INFO_HIGH, "rbm error: %u msdus",
8512 			       pdev->soc->stats.rx.err.invalid_rbm);
8513 		DP_TRACE_STATS(INFO_HIGH, "hal ring access fail: %u msdus",
8514 			       pdev->soc->stats.rx.err.hal_ring_access_fail);
8515 
8516 		for (error_code = 0; error_code < HAL_REO_ERR_MAX;
8517 				error_code++) {
8518 			if (!pdev->soc->stats.rx.err.reo_error[error_code])
8519 				continue;
8520 			DP_TRACE_STATS(INFO_HIGH,
8521 				       "Reo error number (%u): %u msdus",
8522 				       error_code,
8523 				       pdev->soc->stats.rx.err
8524 				       .reo_error[error_code]);
8525 		}
8526 
8527 		for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
8528 				error_code++) {
8529 			if (!pdev->soc->stats.rx.err.rxdma_error[error_code])
8530 				continue;
8531 			DP_TRACE_STATS(INFO_HIGH,
8532 				       "Rxdma error number (%u): %u msdus",
8533 				       error_code,
8534 				       pdev->soc->stats.rx.err
8535 				       .rxdma_error[error_code]);
8536 		}
8537 
8538 		DP_TRACE_STATS(INFO_HIGH, "Rx packets reaped per interrupt:");
8539 		DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
8540 			       pdev->stats.rx_ind_histogram.pkts_1);
8541 		DP_TRACE_STATS(INFO_HIGH, "2-20 Packets:  %u",
8542 			       pdev->stats.rx_ind_histogram.pkts_2_20);
8543 		DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
8544 			       pdev->stats.rx_ind_histogram.pkts_21_40);
8545 		DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
8546 			       pdev->stats.rx_ind_histogram.pkts_41_60);
8547 		DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
8548 			       pdev->stats.rx_ind_histogram.pkts_61_80);
8549 		DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
8550 			       pdev->stats.rx_ind_histogram.pkts_81_100);
8551 		DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
8552 			       pdev->stats.rx_ind_histogram.pkts_101_200);
8553 		DP_TRACE_STATS(INFO_HIGH, "   201+ Packets: %u",
8554 			       pdev->stats.rx_ind_histogram.pkts_201_plus);
8555 
8556 		DP_TRACE_STATS(INFO_HIGH, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
8557 			       __func__,
8558 			       pdev->soc->wlan_cfg_ctx
8559 			       ->tso_enabled,
8560 			       pdev->soc->wlan_cfg_ctx
8561 			       ->lro_enabled,
8562 			       pdev->soc->wlan_cfg_ctx
8563 			       ->rx_hash,
8564 			       pdev->soc->wlan_cfg_ctx
8565 			       ->napi_enabled);
8566 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
8567 		DP_TRACE_STATS(INFO_HIGH, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
8568 			       __func__,
8569 			       pdev->soc->wlan_cfg_ctx
8570 			       ->tx_flow_stop_queue_threshold,
8571 			       pdev->soc->wlan_cfg_ctx
8572 			       ->tx_flow_start_queue_offset);
8573 #endif
8574 	}
8575 }
8576 
8577 /*
8578  * dp_txrx_dump_stats() -  Dump statistics
8579  * @value - Statistics option
8580  */
8581 static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
8582 				     enum qdf_stats_verbosity_level level)
8583 {
8584 	struct dp_soc *soc =
8585 		(struct dp_soc *)psoc;
8586 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8587 
8588 	if (!soc) {
8589 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8590 			"%s: soc is NULL", __func__);
8591 		return QDF_STATUS_E_INVAL;
8592 	}
8593 
8594 	switch (value) {
8595 	case CDP_TXRX_PATH_STATS:
8596 		dp_txrx_path_stats(soc);
8597 		break;
8598 
8599 	case CDP_RX_RING_STATS:
8600 		dp_print_per_ring_stats(soc);
8601 		break;
8602 
8603 	case CDP_TXRX_TSO_STATS:
8604 		/* TODO: NOT IMPLEMENTED */
8605 		break;
8606 
8607 	case CDP_DUMP_TX_FLOW_POOL_INFO:
8608 		cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
8609 		break;
8610 
8611 	case CDP_DP_NAPI_STATS:
8612 		dp_print_napi_stats(soc);
8613 		break;
8614 
8615 	case CDP_TXRX_DESC_STATS:
8616 		/* TODO: NOT IMPLEMENTED */
8617 		break;
8618 
8619 	default:
8620 		status = QDF_STATUS_E_INVAL;
8621 		break;
8622 	}
8623 
8624 	return status;
8625 
8626 }
8627 
8628 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
8629 /**
8630  * dp_update_flow_control_parameters() - API to store datapath
8631  *                            config parameters
8632  * @soc: soc handle
8633  * @cfg: ini parameter handle
8634  *
8635  * Return: void
8636  */
8637 static inline
8638 void dp_update_flow_control_parameters(struct dp_soc *soc,
8639 				struct cdp_config_params *params)
8640 {
8641 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
8642 					params->tx_flow_stop_queue_threshold;
8643 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
8644 					params->tx_flow_start_queue_offset;
8645 }
8646 #else
8647 static inline
8648 void dp_update_flow_control_parameters(struct dp_soc *soc,
8649 				struct cdp_config_params *params)
8650 {
8651 }
8652 #endif
8653 
8654 /**
8655  * dp_update_config_parameters() - API to store datapath
8656  *                            config parameters
8657  * @soc: soc handle
8658  * @cfg: ini parameter handle
8659  *
8660  * Return: status
8661  */
8662 static
8663 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
8664 				struct cdp_config_params *params)
8665 {
8666 	struct dp_soc *soc = (struct dp_soc *)psoc;
8667 
8668 	if (!(soc)) {
8669 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8670 				"%s: Invalid handle", __func__);
8671 		return QDF_STATUS_E_INVAL;
8672 	}
8673 
8674 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
8675 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
8676 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
8677 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
8678 				params->tcp_udp_checksumoffload;
8679 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
8680 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
8681 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
8682 
8683 	dp_update_flow_control_parameters(soc, params);
8684 
8685 	return QDF_STATUS_SUCCESS;
8686 }
8687 
8688 /**
8689  * dp_txrx_set_wds_rx_policy() - API to store datapath
8690  *                            config parameters
8691  * @vdev_handle - datapath vdev handle
8692  * @cfg: ini parameter handle
8693  *
8694  * Return: status
8695  */
8696 #ifdef WDS_VENDOR_EXTENSION
8697 void
8698 dp_txrx_set_wds_rx_policy(
8699 		struct cdp_vdev *vdev_handle,
8700 		u_int32_t val)
8701 {
8702 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8703 	struct dp_peer *peer;
8704 	if (vdev->opmode == wlan_op_mode_ap) {
8705 		/* for ap, set it on bss_peer */
8706 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
8707 			if (peer->bss_peer) {
8708 				peer->wds_ecm.wds_rx_filter = 1;
8709 				peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
8710 				peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
8711 				break;
8712 			}
8713 		}
8714 	} else if (vdev->opmode == wlan_op_mode_sta) {
8715 		peer = TAILQ_FIRST(&vdev->peer_list);
8716 		peer->wds_ecm.wds_rx_filter = 1;
8717 		peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
8718 		peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
8719 	}
8720 }
8721 
8722 /**
8723  * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
8724  *
8725  * @peer_handle - datapath peer handle
8726  * @wds_tx_ucast: policy for unicast transmission
8727  * @wds_tx_mcast: policy for multicast transmission
8728  *
8729  * Return: void
8730  */
8731 void
8732 dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
8733 		int wds_tx_ucast, int wds_tx_mcast)
8734 {
8735 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8736 	if (wds_tx_ucast || wds_tx_mcast) {
8737 		peer->wds_enabled = 1;
8738 		peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
8739 		peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
8740 	} else {
8741 		peer->wds_enabled = 0;
8742 		peer->wds_ecm.wds_tx_ucast_4addr = 0;
8743 		peer->wds_ecm.wds_tx_mcast_4addr = 0;
8744 	}
8745 
8746 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8747 			FL("Policy Update set to :\
8748 				peer->wds_enabled %d\
8749 				peer->wds_ecm.wds_tx_ucast_4addr %d\
8750 				peer->wds_ecm.wds_tx_mcast_4addr %d"),
8751 				peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
8752 				peer->wds_ecm.wds_tx_mcast_4addr);
8753 	return;
8754 }
8755 #endif
8756 
8757 static struct cdp_wds_ops dp_ops_wds = {
8758 	.vdev_set_wds = dp_vdev_set_wds,
8759 #ifdef WDS_VENDOR_EXTENSION
8760 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
8761 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
8762 #endif
8763 };
8764 
8765 /*
8766  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
8767  * @vdev_handle - datapath vdev handle
8768  * @callback - callback function
8769  * @ctxt: callback context
8770  *
8771  */
8772 static void
8773 dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
8774 		       ol_txrx_data_tx_cb callback, void *ctxt)
8775 {
8776 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8777 
8778 	vdev->tx_non_std_data_callback.func = callback;
8779 	vdev->tx_non_std_data_callback.ctxt = ctxt;
8780 }
8781 
8782 /**
8783  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
8784  * @pdev_hdl: datapath pdev handle
8785  *
8786  * Return: opaque pointer to dp txrx handle
8787  */
8788 static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
8789 {
8790 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
8791 
8792 	return pdev->dp_txrx_handle;
8793 }
8794 
8795 /**
8796  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
8797  * @pdev_hdl: datapath pdev handle
8798  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
8799  *
8800  * Return: void
8801  */
8802 static void
8803 dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
8804 {
8805 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
8806 
8807 	pdev->dp_txrx_handle = dp_txrx_hdl;
8808 }
8809 
8810 /**
8811  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
8812  * @soc_handle: datapath soc handle
8813  *
8814  * Return: opaque pointer to external dp (non-core DP)
8815  */
8816 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
8817 {
8818 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8819 
8820 	return soc->external_txrx_handle;
8821 }
8822 
8823 /**
8824  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
8825  * @soc_handle: datapath soc handle
8826  * @txrx_handle: opaque pointer to external dp (non-core DP)
8827  *
8828  * Return: void
8829  */
8830 static void
8831 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
8832 {
8833 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8834 
8835 	soc->external_txrx_handle = txrx_handle;
8836 }
8837 
8838 /**
8839  * dp_get_cfg_capabilities() - get dp capabilities
8840  * @soc_handle: datapath soc handle
8841  * @dp_caps: enum for dp capabilities
8842  *
8843  * Return: bool to determine if dp caps is enabled
8844  */
8845 static bool
8846 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
8847 			enum cdp_capabilities dp_caps)
8848 {
8849 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8850 
8851 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
8852 }
8853 
8854 #ifdef FEATURE_AST
8855 static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
8856 {
8857 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
8858 	struct dp_peer *peer = (struct dp_peer *) peer_hdl;
8859 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
8860 
8861 	/*
8862 	 * For BSS peer, new peer is not created on alloc_node if the
8863 	 * peer with same address already exists , instead refcnt is
8864 	 * increased for existing peer. Correspondingly in delete path,
8865 	 * only refcnt is decreased; and peer is only deleted , when all
8866 	 * references are deleted. So delete_in_progress should not be set
8867 	 * for bss_peer, unless only 2 reference remains (peer map reference
8868 	 * and peer hash table reference).
8869 	 */
8870 	if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
8871 		return;
8872 	}
8873 
8874 	peer->delete_in_progress = true;
8875 	dp_peer_delete_ast_entries(soc, peer);
8876 }
8877 #endif
8878 
8879 #ifdef ATH_SUPPORT_NAC_RSSI
8880 /**
8881  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
8882  * @vdev_hdl: DP vdev handle
8883  * @rssi: rssi value
8884  *
8885  * Return: 0 for success. nonzero for failure.
8886  */
8887 static QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
8888 					      char *mac_addr,
8889 					      uint8_t *rssi)
8890 {
8891 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8892 	struct dp_pdev *pdev = vdev->pdev;
8893 	struct dp_neighbour_peer *peer = NULL;
8894 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
8895 
8896 	*rssi = 0;
8897 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
8898 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
8899 		      neighbour_peer_list_elem) {
8900 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
8901 				mac_addr, DP_MAC_ADDR_LEN) == 0) {
8902 			*rssi = peer->rssi;
8903 			status = QDF_STATUS_SUCCESS;
8904 			break;
8905 		}
8906 	}
8907 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
8908 	return status;
8909 }
8910 
8911 static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
8912 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
8913 		uint8_t chan_num)
8914 {
8915 
8916 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8917 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
8918 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
8919 
8920 	pdev->nac_rssi_filtering = 1;
8921 	/* Store address of NAC (neighbour peer) which will be checked
8922 	 * against TA of received packets.
8923 	 */
8924 
8925 	if (cmd == CDP_NAC_PARAM_ADD) {
8926 		dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
8927 						 client_macaddr);
8928 	} else if (cmd == CDP_NAC_PARAM_DEL) {
8929 		dp_update_filter_neighbour_peers(vdev_handle,
8930 						 DP_NAC_PARAM_DEL,
8931 						 client_macaddr);
8932 	}
8933 
8934 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
8935 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
8936 			((void *)vdev->pdev->ctrl_pdev,
8937 			 vdev->vdev_id, cmd, bssid);
8938 
8939 	return QDF_STATUS_SUCCESS;
8940 }
8941 #endif
8942 
8943 /**
8944  * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
8945  * for pktlog
8946  * @txrx_pdev_handle: cdp_pdev handle
8947  * @enb_dsb: Enable or disable peer based filtering
8948  *
8949  * Return: QDF_STATUS
8950  */
8951 static int
8952 dp_enable_peer_based_pktlog(
8953 	struct cdp_pdev *txrx_pdev_handle,
8954 	char *mac_addr, uint8_t enb_dsb)
8955 {
8956 	struct dp_peer *peer;
8957 	uint8_t local_id;
8958 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev_handle;
8959 
8960 	peer = (struct dp_peer *)dp_find_peer_by_addr(txrx_pdev_handle,
8961 			mac_addr, &local_id);
8962 
8963 	if (!peer) {
8964 		dp_err("Invalid Peer");
8965 		return QDF_STATUS_E_FAILURE;
8966 	}
8967 
8968 	peer->peer_based_pktlog_filter = enb_dsb;
8969 	pdev->dp_peer_based_pktlog = enb_dsb;
8970 
8971 	return QDF_STATUS_SUCCESS;
8972 }
8973 
8974 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
8975 					   uint32_t max_peers,
8976 					   bool peer_map_unmap_v2)
8977 {
8978 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8979 
8980 	soc->max_peers = max_peers;
8981 
8982 	qdf_print ("%s max_peers %u\n", __func__, max_peers);
8983 
8984 	if (dp_peer_find_attach(soc))
8985 		return QDF_STATUS_E_FAILURE;
8986 
8987 	soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
8988 
8989 	return QDF_STATUS_SUCCESS;
8990 }
8991 
8992 /**
8993  * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
8994  * @dp_pdev: dp pdev handle
8995  * @ctrl_pdev: UMAC ctrl pdev handle
8996  *
8997  * Return: void
8998  */
8999 static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
9000 				  struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
9001 {
9002 	struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
9003 
9004 	pdev->ctrl_pdev = ctrl_pdev;
9005 }
9006 
9007 /*
9008  * dp_get_cfg() - get dp cfg
9009  * @soc: cdp soc handle
9010  * @cfg: cfg enum
9011  *
9012  * Return: cfg value
9013  */
9014 static uint32_t dp_get_cfg(void *soc, enum cdp_dp_cfg cfg)
9015 {
9016 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
9017 	uint32_t value = 0;
9018 
9019 	switch (cfg) {
9020 	case cfg_dp_enable_data_stall:
9021 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
9022 		break;
9023 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
9024 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
9025 		break;
9026 	case cfg_dp_tso_enable:
9027 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
9028 		break;
9029 	case cfg_dp_lro_enable:
9030 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
9031 		break;
9032 	case cfg_dp_gro_enable:
9033 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
9034 		break;
9035 	case cfg_dp_tx_flow_start_queue_offset:
9036 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
9037 		break;
9038 	case cfg_dp_tx_flow_stop_queue_threshold:
9039 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
9040 		break;
9041 	case cfg_dp_disable_intra_bss_fwd:
9042 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
9043 		break;
9044 	default:
9045 		value =  0;
9046 	}
9047 
9048 	return value;
9049 }
9050 
9051 static struct cdp_cmn_ops dp_ops_cmn = {
9052 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
9053 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
9054 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
9055 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
9056 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
9057 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
9058 	.txrx_peer_create = dp_peer_create_wifi3,
9059 	.txrx_peer_setup = dp_peer_setup_wifi3,
9060 #ifdef FEATURE_AST
9061 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
9062 #else
9063 	.txrx_peer_teardown = NULL,
9064 #endif
9065 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
9066 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
9067 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
9068 	.txrx_peer_get_ast_info_by_pdev =
9069 		dp_peer_get_ast_info_by_pdevid_wifi3,
9070 	.txrx_peer_ast_delete_by_soc =
9071 		dp_peer_ast_entry_del_by_soc,
9072 	.txrx_peer_ast_delete_by_pdev =
9073 		dp_peer_ast_entry_del_by_pdev,
9074 	.txrx_peer_delete = dp_peer_delete_wifi3,
9075 	.txrx_vdev_register = dp_vdev_register_wifi3,
9076 	.txrx_vdev_flush_peers = dp_vdev_flush_peers,
9077 	.txrx_soc_detach = dp_soc_detach_wifi3,
9078 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
9079 	.txrx_soc_init = dp_soc_init_wifi3,
9080 	.txrx_tso_soc_attach = dp_tso_soc_attach,
9081 	.txrx_tso_soc_detach = dp_tso_soc_detach,
9082 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
9083 	.txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
9084 	.txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
9085 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
9086 	.txrx_ath_getstats = dp_get_device_stats,
9087 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
9088 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
9089 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
9090 	.delba_process = dp_delba_process_wifi3,
9091 	.set_addba_response = dp_set_addba_response,
9092 	.get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
9093 	.flush_cache_rx_queue = NULL,
9094 	/* TODO: get API's for dscp-tid need to be added*/
9095 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
9096 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
9097 	.hmmc_tid_override_en = dp_hmmc_tid_override_en_wifi3,
9098 	.set_hmmc_tid_val = dp_set_hmmc_tid_val_wifi3,
9099 	.txrx_get_total_per = dp_get_total_per,
9100 	.txrx_stats_request = dp_txrx_stats_request,
9101 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
9102 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
9103 	.txrx_get_vow_config_frm_pdev = NULL,
9104 	.txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
9105 	.txrx_set_nac = dp_set_nac,
9106 	.txrx_get_tx_pending = dp_get_tx_pending,
9107 	.txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
9108 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
9109 	.display_stats = dp_txrx_dump_stats,
9110 	.txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
9111 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
9112 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
9113 	.txrx_intr_detach = dp_soc_interrupt_detach,
9114 	.set_pn_check = dp_set_pn_check_wifi3,
9115 	.update_config_parameters = dp_update_config_parameters,
9116 	/* TODO: Add other functions */
9117 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
9118 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
9119 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
9120 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
9121 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
9122 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
9123 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
9124 	.tx_send = dp_tx_send,
9125 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
9126 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
9127 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
9128 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
9129 	.txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
9130 	.txrx_get_os_rx_handles_from_vdev =
9131 					dp_get_os_rx_handles_from_vdev_wifi3,
9132 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
9133 	.get_dp_capabilities = dp_get_cfg_capabilities,
9134 	.txrx_get_cfg = dp_get_cfg,
9135 };
9136 
9137 static struct cdp_ctrl_ops dp_ops_ctrl = {
9138 	.txrx_peer_authorize = dp_peer_authorize,
9139 	.txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
9140 	.txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
9141 #ifdef MESH_MODE_SUPPORT
9142 	.txrx_set_mesh_mode  = dp_peer_set_mesh_mode,
9143 	.txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
9144 #endif
9145 	.txrx_set_vdev_param = dp_set_vdev_param,
9146 	.txrx_peer_set_nawds = dp_peer_set_nawds,
9147 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
9148 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
9149 	.txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
9150 	.txrx_update_filter_neighbour_peers =
9151 		dp_update_filter_neighbour_peers,
9152 	.txrx_get_sec_type = dp_get_sec_type,
9153 	/* TODO: Add other functions */
9154 	.txrx_wdi_event_sub = dp_wdi_event_sub,
9155 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
9156 #ifdef WDI_EVENT_ENABLE
9157 	.txrx_get_pldev = dp_get_pldev,
9158 #endif
9159 	.txrx_set_pdev_param = dp_set_pdev_param,
9160 #ifdef ATH_SUPPORT_NAC_RSSI
9161 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
9162 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
9163 #endif
9164 	.set_key = dp_set_michael_key,
9165 	.txrx_get_vdev_param = dp_get_vdev_param,
9166 	.enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
9167 };
9168 
9169 static struct cdp_me_ops dp_ops_me = {
9170 #ifdef ATH_SUPPORT_IQUE
9171 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
9172 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
9173 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
9174 #endif
9175 	.tx_me_find_ast_entry = NULL,
9176 };
9177 
9178 static struct cdp_mon_ops dp_ops_mon = {
9179 	.txrx_monitor_set_filter_ucast_data = NULL,
9180 	.txrx_monitor_set_filter_mcast_data = NULL,
9181 	.txrx_monitor_set_filter_non_data = NULL,
9182 	.txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
9183 	.txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
9184 	.txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
9185 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
9186 	/* Added support for HK advance filter */
9187 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
9188 };
9189 
9190 static struct cdp_host_stats_ops dp_ops_host_stats = {
9191 	.txrx_per_peer_stats = dp_get_host_peer_stats,
9192 	.get_fw_peer_stats = dp_get_fw_peer_stats,
9193 	.get_htt_stats = dp_get_htt_stats,
9194 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
9195 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
9196 	.txrx_stats_publish = dp_txrx_stats_publish,
9197 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
9198 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
9199 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
9200 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
9201 	/* TODO */
9202 };
9203 
9204 static struct cdp_raw_ops dp_ops_raw = {
9205 	/* TODO */
9206 };
9207 
9208 #ifdef CONFIG_WIN
9209 static struct cdp_pflow_ops dp_ops_pflow = {
9210 	/* TODO */
9211 };
9212 #endif /* CONFIG_WIN */
9213 
9214 #ifdef FEATURE_RUNTIME_PM
9215 /**
9216  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
9217  * @opaque_pdev: DP pdev context
9218  *
9219  * DP is ready to runtime suspend if there are no pending TX packets.
9220  *
9221  * Return: QDF_STATUS
9222  */
9223 static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
9224 {
9225 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
9226 	struct dp_soc *soc = pdev->soc;
9227 
9228 	/* Abort if there are any pending TX packets */
9229 	if (dp_get_tx_pending(opaque_pdev) > 0) {
9230 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
9231 			  FL("Abort suspend due to pending TX packets"));
9232 		return QDF_STATUS_E_AGAIN;
9233 	}
9234 
9235 	if (soc->intr_mode == DP_INTR_POLL)
9236 		qdf_timer_stop(&soc->int_timer);
9237 
9238 	return QDF_STATUS_SUCCESS;
9239 }
9240 
9241 /**
9242  * dp_runtime_resume() - ensure DP is ready to runtime resume
9243  * @opaque_pdev: DP pdev context
9244  *
9245  * Resume DP for runtime PM.
9246  *
9247  * Return: QDF_STATUS
9248  */
9249 static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
9250 {
9251 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
9252 	struct dp_soc *soc = pdev->soc;
9253 	void *hal_srng;
9254 	int i;
9255 
9256 	if (soc->intr_mode == DP_INTR_POLL)
9257 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
9258 
9259 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
9260 		hal_srng = soc->tcl_data_ring[i].hal_srng;
9261 		if (hal_srng) {
9262 			/* We actually only need to acquire the lock */
9263 			hal_srng_access_start(soc->hal_soc, hal_srng);
9264 			/* Update SRC ring head pointer for HW to send
9265 			   all pending packets */
9266 			hal_srng_access_end(soc->hal_soc, hal_srng);
9267 		}
9268 	}
9269 
9270 	return QDF_STATUS_SUCCESS;
9271 }
9272 #endif /* FEATURE_RUNTIME_PM */
9273 
9274 #ifndef CONFIG_WIN
9275 static struct cdp_misc_ops dp_ops_misc = {
9276 	.tx_non_std = dp_tx_non_std,
9277 	.get_opmode = dp_get_opmode,
9278 #ifdef FEATURE_RUNTIME_PM
9279 	.runtime_suspend = dp_runtime_suspend,
9280 	.runtime_resume = dp_runtime_resume,
9281 #endif /* FEATURE_RUNTIME_PM */
9282 	.pkt_log_init = dp_pkt_log_init,
9283 	.pkt_log_con_service = dp_pkt_log_con_service,
9284 	.get_num_rx_contexts = dp_get_num_rx_contexts,
9285 };
9286 
9287 static struct cdp_flowctl_ops dp_ops_flowctl = {
9288 	/* WIFI 3.0 DP implement as required. */
9289 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
9290 	.flow_pool_map_handler = dp_tx_flow_pool_map,
9291 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
9292 	.register_pause_cb = dp_txrx_register_pause_cb,
9293 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
9294 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
9295 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
9296 };
9297 
9298 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
9299 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9300 };
9301 
9302 #ifdef IPA_OFFLOAD
9303 static struct cdp_ipa_ops dp_ops_ipa = {
9304 	.ipa_get_resource = dp_ipa_get_resource,
9305 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
9306 	.ipa_op_response = dp_ipa_op_response,
9307 	.ipa_register_op_cb = dp_ipa_register_op_cb,
9308 	.ipa_get_stat = dp_ipa_get_stat,
9309 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
9310 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
9311 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
9312 	.ipa_setup = dp_ipa_setup,
9313 	.ipa_cleanup = dp_ipa_cleanup,
9314 	.ipa_setup_iface = dp_ipa_setup_iface,
9315 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
9316 	.ipa_enable_pipes = dp_ipa_enable_pipes,
9317 	.ipa_disable_pipes = dp_ipa_disable_pipes,
9318 	.ipa_set_perf_level = dp_ipa_set_perf_level
9319 };
9320 #endif
9321 
9322 static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
9323 {
9324 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
9325 	struct dp_soc *soc = pdev->soc;
9326 	int timeout = SUSPEND_DRAIN_WAIT;
9327 	int drain_wait_delay = 50; /* 50 ms */
9328 
9329 	/* Abort if there are any pending TX packets */
9330 	while (dp_get_tx_pending(opaque_pdev) > 0) {
9331 		qdf_sleep(drain_wait_delay);
9332 		if (timeout <= 0) {
9333 			dp_err("TX frames are pending, abort suspend");
9334 			return QDF_STATUS_E_TIMEOUT;
9335 		}
9336 		timeout = timeout - drain_wait_delay;
9337 	}
9338 
9339 	if (soc->intr_mode == DP_INTR_POLL)
9340 		qdf_timer_stop(&soc->int_timer);
9341 
9342 	return QDF_STATUS_SUCCESS;
9343 }
9344 
9345 static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
9346 {
9347 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
9348 	struct dp_soc *soc = pdev->soc;
9349 
9350 	if (soc->intr_mode == DP_INTR_POLL)
9351 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
9352 
9353 	return QDF_STATUS_SUCCESS;
9354 }
9355 
9356 static struct cdp_bus_ops dp_ops_bus = {
9357 	.bus_suspend = dp_bus_suspend,
9358 	.bus_resume = dp_bus_resume
9359 };
9360 
9361 static struct cdp_ocb_ops dp_ops_ocb = {
9362 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9363 };
9364 
9365 
9366 static struct cdp_throttle_ops dp_ops_throttle = {
9367 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9368 };
9369 
9370 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
9371 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9372 };
9373 
9374 static struct cdp_cfg_ops dp_ops_cfg = {
9375 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9376 };
9377 
9378 /*
9379  * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
9380  * @dev: physical device instance
9381  * @peer_mac_addr: peer mac address
9382  * @local_id: local id for the peer
9383  * @debug_id: to track enum peer access
9384  *
9385  * Return: peer instance pointer
9386  */
9387 static inline void *
9388 dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
9389 			     uint8_t *local_id,
9390 			     enum peer_debug_id_type debug_id)
9391 {
9392 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
9393 	struct dp_peer *peer;
9394 
9395 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
9396 
9397 	if (!peer)
9398 		return NULL;
9399 
9400 	*local_id = peer->local_id;
9401 	DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
9402 
9403 	return peer;
9404 }
9405 
9406 /*
9407  * dp_peer_release_ref - release peer ref count
9408  * @peer: peer handle
9409  * @debug_id: to track enum peer access
9410  *
9411  * Return: None
9412  */
9413 static inline
9414 void dp_peer_release_ref(void *peer, enum peer_debug_id_type debug_id)
9415 {
9416 	dp_peer_unref_delete(peer);
9417 }
9418 
9419 static struct cdp_peer_ops dp_ops_peer = {
9420 	.register_peer = dp_register_peer,
9421 	.clear_peer = dp_clear_peer,
9422 	.find_peer_by_addr = dp_find_peer_by_addr,
9423 	.find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
9424 	.peer_get_ref_by_addr = dp_peer_get_ref_find_by_addr,
9425 	.peer_release_ref = dp_peer_release_ref,
9426 	.local_peer_id = dp_local_peer_id,
9427 	.peer_find_by_local_id = dp_peer_find_by_local_id,
9428 	.peer_state_update = dp_peer_state_update,
9429 	.get_vdevid = dp_get_vdevid,
9430 	.get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
9431 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
9432 	.get_vdev_for_peer = dp_get_vdev_for_peer,
9433 	.get_peer_state = dp_get_peer_state,
9434 };
9435 #endif
9436 
9437 static struct cdp_ops dp_txrx_ops = {
9438 	.cmn_drv_ops = &dp_ops_cmn,
9439 	.ctrl_ops = &dp_ops_ctrl,
9440 	.me_ops = &dp_ops_me,
9441 	.mon_ops = &dp_ops_mon,
9442 	.host_stats_ops = &dp_ops_host_stats,
9443 	.wds_ops = &dp_ops_wds,
9444 	.raw_ops = &dp_ops_raw,
9445 #ifdef CONFIG_WIN
9446 	.pflow_ops = &dp_ops_pflow,
9447 #endif /* CONFIG_WIN */
9448 #ifndef CONFIG_WIN
9449 	.misc_ops = &dp_ops_misc,
9450 	.cfg_ops = &dp_ops_cfg,
9451 	.flowctl_ops = &dp_ops_flowctl,
9452 	.l_flowctl_ops = &dp_ops_l_flowctl,
9453 #ifdef IPA_OFFLOAD
9454 	.ipa_ops = &dp_ops_ipa,
9455 #endif
9456 	.bus_ops = &dp_ops_bus,
9457 	.ocb_ops = &dp_ops_ocb,
9458 	.peer_ops = &dp_ops_peer,
9459 	.throttle_ops = &dp_ops_throttle,
9460 	.mob_stats_ops = &dp_ops_mob_stats,
9461 #endif
9462 };
9463 
9464 /*
9465  * dp_soc_set_txrx_ring_map()
9466  * @dp_soc: DP handler for soc
9467  *
9468  * Return: Void
9469  */
9470 static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
9471 {
9472 	uint32_t i;
9473 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
9474 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
9475 	}
9476 }
9477 
9478 #ifdef QCA_WIFI_QCA8074
9479 
9480 #ifndef QCA_MEM_ATTACH_ON_WIFI3
9481 
9482 /**
9483  * dp_soc_attach_wifi3() - Attach txrx SOC
9484  * @ctrl_psoc: Opaque SOC handle from control plane
9485  * @htc_handle: Opaque HTC handle
9486  * @hif_handle: Opaque HIF handle
9487  * @qdf_osdev: QDF device
9488  * @ol_ops: Offload Operations
9489  * @device_id: Device ID
9490  *
9491  * Return: DP SOC handle on success, NULL on failure
9492  */
9493 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
9494 			  HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9495 			  struct ol_if_ops *ol_ops, uint16_t device_id)
9496 {
9497 	struct dp_soc *dp_soc =  NULL;
9498 
9499 	dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
9500 			       ol_ops, device_id);
9501 	if (!dp_soc)
9502 		return NULL;
9503 
9504 	if (!dp_soc_init(dp_soc, htc_handle, hif_handle))
9505 		return NULL;
9506 
9507 	return (void *)dp_soc;
9508 }
9509 #else
9510 
9511 /**
9512  * dp_soc_attach_wifi3() - Attach txrx SOC
9513  * @ctrl_psoc: Opaque SOC handle from control plane
9514  * @htc_handle: Opaque HTC handle
9515  * @hif_handle: Opaque HIF handle
9516  * @qdf_osdev: QDF device
9517  * @ol_ops: Offload Operations
9518  * @device_id: Device ID
9519  *
9520  * Return: DP SOC handle on success, NULL on failure
9521  */
9522 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
9523 			  HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9524 			  struct ol_if_ops *ol_ops, uint16_t device_id)
9525 {
9526 	struct dp_soc *dp_soc = NULL;
9527 
9528 	dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
9529 			       ol_ops, device_id);
9530 	return (void *)dp_soc;
9531 }
9532 
9533 #endif
9534 
9535 /**
9536  * dp_soc_attach() - Attach txrx SOC
9537  * @ctrl_psoc: Opaque SOC handle from control plane
9538  * @htc_handle: Opaque HTC handle
9539  * @qdf_osdev: QDF device
9540  * @ol_ops: Offload Operations
9541  * @device_id: Device ID
9542  *
9543  * Return: DP SOC handle on success, NULL on failure
9544  */
9545 static struct dp_soc *
9546 dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9547 	      struct ol_if_ops *ol_ops, uint16_t device_id)
9548 {
9549 	int int_ctx;
9550 	struct dp_soc *soc =  NULL;
9551 	struct htt_soc *htt_soc = NULL;
9552 
9553 	soc = qdf_mem_malloc(sizeof(*soc));
9554 
9555 	if (!soc) {
9556 		dp_err("DP SOC memory allocation failed");
9557 		goto fail0;
9558 	}
9559 
9560 	int_ctx = 0;
9561 	soc->device_id = device_id;
9562 	soc->cdp_soc.ops = &dp_txrx_ops;
9563 	soc->cdp_soc.ol_ops = ol_ops;
9564 	soc->ctrl_psoc = ctrl_psoc;
9565 	soc->osdev = qdf_osdev;
9566 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
9567 
9568 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
9569 	if (!soc->wlan_cfg_ctx) {
9570 		dp_err("wlan_cfg_ctx failed\n");
9571 		goto fail1;
9572 	}
9573 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
9574 	if (!htt_soc) {
9575 		dp_err("HTT attach failed");
9576 		goto fail1;
9577 	}
9578 	soc->htt_handle = htt_soc;
9579 	htt_soc->dp_soc = soc;
9580 	htt_soc->htc_soc = htc_handle;
9581 
9582 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
9583 		goto fail2;
9584 
9585 	return (void *)soc;
9586 fail2:
9587 	qdf_mem_free(htt_soc);
9588 fail1:
9589 	qdf_mem_free(soc);
9590 fail0:
9591 	return NULL;
9592 }
9593 
9594 /**
9595  * dp_soc_init() - Initialize txrx SOC
9596  * @dp_soc: Opaque DP SOC handle
9597  * @htc_handle: Opaque HTC handle
9598  * @hif_handle: Opaque HIF handle
9599  *
9600  * Return: DP SOC handle on success, NULL on failure
9601  */
9602 void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle)
9603 {
9604 	int target_type;
9605 	struct dp_soc *soc = (struct dp_soc *)dpsoc;
9606 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
9607 
9608 	htt_soc->htc_soc = htc_handle;
9609 	soc->hif_handle = hif_handle;
9610 
9611 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
9612 	if (!soc->hal_soc)
9613 		return NULL;
9614 
9615 	htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc, htt_soc->htc_soc,
9616 			   soc->hal_soc, soc->osdev);
9617 	target_type = hal_get_target_type(soc->hal_soc);
9618 	switch (target_type) {
9619 	case TARGET_TYPE_QCA6290:
9620 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9621 					       REO_DST_RING_SIZE_QCA6290);
9622 		soc->ast_override_support = 1;
9623 		break;
9624 #ifdef QCA_WIFI_QCA6390
9625 	case TARGET_TYPE_QCA6390:
9626 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9627 					       REO_DST_RING_SIZE_QCA6290);
9628 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
9629 		soc->ast_override_support = 1;
9630 		if (con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
9631 			int int_ctx;
9632 
9633 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
9634 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
9635 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
9636 			}
9637 		}
9638 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
9639 		break;
9640 #endif
9641 	case TARGET_TYPE_QCA8074:
9642 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9643 					       REO_DST_RING_SIZE_QCA8074);
9644 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
9645 		break;
9646 	case TARGET_TYPE_QCA8074V2:
9647 	case TARGET_TYPE_QCA6018:
9648 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9649 					       REO_DST_RING_SIZE_QCA8074);
9650 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
9651 		soc->hw_nac_monitor_support = 1;
9652 		soc->ast_override_support = 1;
9653 		soc->per_tid_basize_max_tid = 8;
9654 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
9655 		break;
9656 	default:
9657 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
9658 		qdf_assert_always(0);
9659 		break;
9660 	}
9661 
9662 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
9663 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
9664 	soc->cce_disable = false;
9665 
9666 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
9667 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
9668 				CDP_CFG_MAX_PEER_ID);
9669 
9670 		if (ret != -EINVAL) {
9671 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
9672 		}
9673 
9674 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
9675 				CDP_CFG_CCE_DISABLE);
9676 		if (ret == 1)
9677 			soc->cce_disable = true;
9678 	}
9679 
9680 	qdf_spinlock_create(&soc->peer_ref_mutex);
9681 	qdf_spinlock_create(&soc->ast_lock);
9682 
9683 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
9684 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
9685 
9686 	/* fill the tx/rx cpu ring map*/
9687 	dp_soc_set_txrx_ring_map(soc);
9688 
9689 	qdf_spinlock_create(&soc->htt_stats.lock);
9690 	/* initialize work queue for stats processing */
9691 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
9692 
9693 	return soc;
9694 
9695 }
9696 
9697 /**
9698  * dp_soc_init_wifi3() - Initialize txrx SOC
9699  * @dp_soc: Opaque DP SOC handle
9700  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
9701  * @hif_handle: Opaque HIF handle
9702  * @htc_handle: Opaque HTC handle
9703  * @qdf_osdev: QDF device (Unused)
9704  * @ol_ops: Offload Operations (Unused)
9705  * @device_id: Device ID (Unused)
9706  *
9707  * Return: DP SOC handle on success, NULL on failure
9708  */
9709 void *dp_soc_init_wifi3(void *dpsoc, void *ctrl_psoc, void *hif_handle,
9710 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9711 			struct ol_if_ops *ol_ops, uint16_t device_id)
9712 {
9713 	return dp_soc_init(dpsoc, htc_handle, hif_handle);
9714 }
9715 
9716 #endif
9717 
9718 /*
9719  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
9720  *
9721  * @soc: handle to DP soc
9722  * @mac_id: MAC id
9723  *
9724  * Return: Return pdev corresponding to MAC
9725  */
9726 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
9727 {
9728 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
9729 		return soc->pdev_list[mac_id];
9730 
9731 	/* Typically for MCL as there only 1 PDEV*/
9732 	return soc->pdev_list[0];
9733 }
9734 
9735 /*
9736  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
9737  * @soc:		DP SoC context
9738  * @max_mac_rings:	No of MAC rings
9739  *
9740  * Return: None
9741  */
9742 static
9743 void dp_is_hw_dbs_enable(struct dp_soc *soc,
9744 				int *max_mac_rings)
9745 {
9746 	bool dbs_enable = false;
9747 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
9748 		dbs_enable = soc->cdp_soc.ol_ops->
9749 		is_hw_dbs_2x2_capable(soc->ctrl_psoc);
9750 
9751 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
9752 }
9753 
9754 /*
9755 * dp_set_pktlog_wifi3() - attach txrx vdev
9756 * @pdev: Datapath PDEV handle
9757 * @event: which event's notifications are being subscribed to
9758 * @enable: WDI event subscribe or not. (True or False)
9759 *
9760 * Return: Success, NULL on failure
9761 */
9762 #ifdef WDI_EVENT_ENABLE
9763 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
9764 	bool enable)
9765 {
9766 	struct dp_soc *soc = NULL;
9767 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
9768 	int max_mac_rings = wlan_cfg_get_num_mac_rings
9769 					(pdev->wlan_cfg_ctx);
9770 	uint8_t mac_id = 0;
9771 
9772 	soc = pdev->soc;
9773 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
9774 
9775 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
9776 			FL("Max_mac_rings %d "),
9777 			max_mac_rings);
9778 
9779 	if (enable) {
9780 		switch (event) {
9781 		case WDI_EVENT_RX_DESC:
9782 			if (pdev->monitor_vdev) {
9783 				/* Nothing needs to be done if monitor mode is
9784 				 * enabled
9785 				 */
9786 				return 0;
9787 			}
9788 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
9789 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
9790 				htt_tlv_filter.mpdu_start = 1;
9791 				htt_tlv_filter.msdu_start = 1;
9792 				htt_tlv_filter.msdu_end = 1;
9793 				htt_tlv_filter.mpdu_end = 1;
9794 				htt_tlv_filter.packet_header = 1;
9795 				htt_tlv_filter.attention = 1;
9796 				htt_tlv_filter.ppdu_start = 1;
9797 				htt_tlv_filter.ppdu_end = 1;
9798 				htt_tlv_filter.ppdu_end_user_stats = 1;
9799 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
9800 				htt_tlv_filter.ppdu_end_status_done = 1;
9801 				htt_tlv_filter.enable_fp = 1;
9802 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
9803 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
9804 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
9805 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
9806 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
9807 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
9808 
9809 				for (mac_id = 0; mac_id < max_mac_rings;
9810 								mac_id++) {
9811 					int mac_for_pdev =
9812 						dp_get_mac_id_for_pdev(mac_id,
9813 								pdev->pdev_id);
9814 
9815 					htt_h2t_rx_ring_cfg(soc->htt_handle,
9816 					 mac_for_pdev,
9817 					 pdev->rxdma_mon_status_ring[mac_id]
9818 					 .hal_srng,
9819 					 RXDMA_MONITOR_STATUS,
9820 					 RX_BUFFER_SIZE,
9821 					 &htt_tlv_filter);
9822 
9823 				}
9824 
9825 				if (soc->reap_timer_init)
9826 					qdf_timer_mod(&soc->mon_reap_timer,
9827 					DP_INTR_POLL_TIMER_MS);
9828 			}
9829 			break;
9830 
9831 		case WDI_EVENT_LITE_RX:
9832 			if (pdev->monitor_vdev) {
9833 				/* Nothing needs to be done if monitor mode is
9834 				 * enabled
9835 				 */
9836 				return 0;
9837 			}
9838 
9839 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
9840 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
9841 
9842 				htt_tlv_filter.ppdu_start = 1;
9843 				htt_tlv_filter.ppdu_end = 1;
9844 				htt_tlv_filter.ppdu_end_user_stats = 1;
9845 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
9846 				htt_tlv_filter.ppdu_end_status_done = 1;
9847 				htt_tlv_filter.mpdu_start = 1;
9848 				htt_tlv_filter.enable_fp = 1;
9849 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
9850 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
9851 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
9852 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
9853 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
9854 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
9855 
9856 				for (mac_id = 0; mac_id < max_mac_rings;
9857 								mac_id++) {
9858 					int mac_for_pdev =
9859 						dp_get_mac_id_for_pdev(mac_id,
9860 								pdev->pdev_id);
9861 
9862 					htt_h2t_rx_ring_cfg(soc->htt_handle,
9863 					mac_for_pdev,
9864 					pdev->rxdma_mon_status_ring[mac_id]
9865 					.hal_srng,
9866 					RXDMA_MONITOR_STATUS,
9867 					RX_BUFFER_SIZE_PKTLOG_LITE,
9868 					&htt_tlv_filter);
9869 				}
9870 
9871 				if (soc->reap_timer_init)
9872 					qdf_timer_mod(&soc->mon_reap_timer,
9873 					DP_INTR_POLL_TIMER_MS);
9874 			}
9875 			break;
9876 
9877 		case WDI_EVENT_LITE_T2H:
9878 			if (pdev->monitor_vdev) {
9879 				/* Nothing needs to be done if monitor mode is
9880 				 * enabled
9881 				 */
9882 				return 0;
9883 			}
9884 
9885 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
9886 				int mac_for_pdev = dp_get_mac_id_for_pdev(
9887 							mac_id,	pdev->pdev_id);
9888 
9889 				pdev->pktlog_ppdu_stats = true;
9890 				dp_h2t_cfg_stats_msg_send(pdev,
9891 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
9892 					mac_for_pdev);
9893 			}
9894 			break;
9895 
9896 		default:
9897 			/* Nothing needs to be done for other pktlog types */
9898 			break;
9899 		}
9900 	} else {
9901 		switch (event) {
9902 		case WDI_EVENT_RX_DESC:
9903 		case WDI_EVENT_LITE_RX:
9904 			if (pdev->monitor_vdev) {
9905 				/* Nothing needs to be done if monitor mode is
9906 				 * enabled
9907 				 */
9908 				return 0;
9909 			}
9910 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
9911 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
9912 
9913 				for (mac_id = 0; mac_id < max_mac_rings;
9914 								mac_id++) {
9915 					int mac_for_pdev =
9916 						dp_get_mac_id_for_pdev(mac_id,
9917 								pdev->pdev_id);
9918 
9919 					htt_h2t_rx_ring_cfg(soc->htt_handle,
9920 					  mac_for_pdev,
9921 					  pdev->rxdma_mon_status_ring[mac_id]
9922 					  .hal_srng,
9923 					  RXDMA_MONITOR_STATUS,
9924 					  RX_BUFFER_SIZE,
9925 					  &htt_tlv_filter);
9926 				}
9927 
9928 				if (soc->reap_timer_init)
9929 					qdf_timer_stop(&soc->mon_reap_timer);
9930 			}
9931 			break;
9932 		case WDI_EVENT_LITE_T2H:
9933 			if (pdev->monitor_vdev) {
9934 				/* Nothing needs to be done if monitor mode is
9935 				 * enabled
9936 				 */
9937 				return 0;
9938 			}
9939 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
9940 			 * passing value 0. Once these macros will define in htt
9941 			 * header file will use proper macros
9942 			*/
9943 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
9944 				int mac_for_pdev =
9945 						dp_get_mac_id_for_pdev(mac_id,
9946 								pdev->pdev_id);
9947 
9948 				pdev->pktlog_ppdu_stats = false;
9949 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
9950 					dp_h2t_cfg_stats_msg_send(pdev, 0,
9951 								mac_for_pdev);
9952 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
9953 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
9954 								mac_for_pdev);
9955 				} else if (pdev->enhanced_stats_en) {
9956 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
9957 								mac_for_pdev);
9958 				}
9959 			}
9960 
9961 			break;
9962 		default:
9963 			/* Nothing needs to be done for other pktlog types */
9964 			break;
9965 		}
9966 	}
9967 	return 0;
9968 }
9969 #endif
9970