xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 8ddef7dd9a290d4a9b1efd5d3efacf51d78a1a0d)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_htt.h"
31 #include "dp_types.h"
32 #include "dp_internal.h"
33 #include "dp_tx.h"
34 #include "dp_tx_desc.h"
35 #include "dp_rx.h"
36 #ifdef DP_RATETABLE_SUPPORT
37 #include "dp_ratetable.h"
38 #endif
39 #include <cdp_txrx_handle.h>
40 #include <wlan_cfg.h>
41 #include "cdp_txrx_cmn_struct.h"
42 #include "cdp_txrx_stats_struct.h"
43 #include "cdp_txrx_cmn_reg.h"
44 #include <qdf_util.h>
45 #include "dp_peer.h"
46 #include "dp_rx_mon.h"
47 #include "htt_stats.h"
48 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
49 #include "cfg_ucfg_api.h"
50 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
51 #include "cdp_txrx_flow_ctrl_v2.h"
52 #else
53 static inline void
54 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
55 {
56 	return;
57 }
58 #endif
59 #include "dp_ipa.h"
60 #include "dp_cal_client_api.h"
61 #ifdef CONFIG_MCL
62 extern int con_mode_monitor;
63 #ifndef REMOVE_PKT_LOG
64 #include <pktlog_ac_api.h>
65 #include <pktlog_ac.h>
66 #endif
67 #endif
68 void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle);
69 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
70 static struct dp_soc *
71 dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
72 	      struct ol_if_ops *ol_ops, uint16_t device_id);
73 static void dp_pktlogmod_exit(struct dp_pdev *handle);
74 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
75 				uint8_t *peer_mac_addr,
76 				struct cdp_ctrl_objmgr_peer *ctrl_peer);
77 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
78 static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
79 static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
80 
81 #define DP_INTR_POLL_TIMER_MS	10
82 /* Generic AST entry aging timer value */
83 #define DP_AST_AGING_TIMER_DEFAULT_MS	1000
84 /* WDS AST entry aging timer value */
85 #define DP_WDS_AST_AGING_TIMER_DEFAULT_MS	120000
86 #define DP_WDS_AST_AGING_TIMER_CNT \
87 ((DP_WDS_AST_AGING_TIMER_DEFAULT_MS / DP_AST_AGING_TIMER_DEFAULT_MS) - 1)
88 #define DP_MCS_LENGTH (6*MAX_MCS)
89 #define DP_NSS_LENGTH (6*SS_COUNT)
90 #define DP_MU_GROUP_SHOW 16
91 #define DP_MU_GROUP_LENGTH (6 * DP_MU_GROUP_SHOW)
92 #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
93 #define DP_MAX_INT_CONTEXTS_STRING_LENGTH (6 * WLAN_CFG_INT_NUM_CONTEXTS)
94 #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
95 #define DP_MAX_MCS_STRING_LEN 30
96 #define DP_CURR_FW_STATS_AVAIL 19
97 #define DP_HTT_DBG_EXT_STATS_MAX 256
98 #define DP_MAX_SLEEP_TIME 100
99 #ifndef QCA_WIFI_3_0_EMU
100 #define SUSPEND_DRAIN_WAIT 500
101 #else
102 #define SUSPEND_DRAIN_WAIT 3000
103 #endif
104 
105 #ifdef IPA_OFFLOAD
106 /* Exclude IPA rings from the interrupt context */
107 #define TX_RING_MASK_VAL	0xb
108 #define RX_RING_MASK_VAL	0x7
109 #else
110 #define TX_RING_MASK_VAL	0xF
111 #define RX_RING_MASK_VAL	0xF
112 #endif
113 
114 #define STR_MAXLEN	64
115 
116 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
117 
118 /* PPDU stats mask sent to FW to enable enhanced stats */
119 #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
120 /* PPDU stats mask sent to FW to support debug sniffer feature */
121 #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
122 /* PPDU stats mask sent to FW to support BPR feature*/
123 #define DP_PPDU_STATS_CFG_BPR 0x2000
124 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
125 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
126 				   DP_PPDU_STATS_CFG_ENH_STATS)
127 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
128 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
129 				      DP_PPDU_TXLITE_STATS_BITMASK_CFG)
130 
131 #define RNG_ERR		"SRNG setup failed for"
132 /**
133  * default_dscp_tid_map - Default DSCP-TID mapping
134  *
135  * DSCP        TID
136  * 000000      0
137  * 001000      1
138  * 010000      2
139  * 011000      3
140  * 100000      4
141  * 101000      5
142  * 110000      6
143  * 111000      7
144  */
145 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
146 	0, 0, 0, 0, 0, 0, 0, 0,
147 	1, 1, 1, 1, 1, 1, 1, 1,
148 	2, 2, 2, 2, 2, 2, 2, 2,
149 	3, 3, 3, 3, 3, 3, 3, 3,
150 	4, 4, 4, 4, 4, 4, 4, 4,
151 	5, 5, 5, 5, 5, 5, 5, 5,
152 	6, 6, 6, 6, 6, 6, 6, 6,
153 	7, 7, 7, 7, 7, 7, 7, 7,
154 };
155 
156 /*
157  * struct dp_rate_debug
158  *
159  * @mcs_type: print string for a given mcs
160  * @valid: valid mcs rate?
161  */
162 struct dp_rate_debug {
163 	char mcs_type[DP_MAX_MCS_STRING_LEN];
164 	uint8_t valid;
165 };
166 
167 #define MCS_VALID 1
168 #define MCS_INVALID 0
169 
170 static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
171 
172 	{
173 		{"OFDM 48 Mbps", MCS_VALID},
174 		{"OFDM 24 Mbps", MCS_VALID},
175 		{"OFDM 12 Mbps", MCS_VALID},
176 		{"OFDM 6 Mbps ", MCS_VALID},
177 		{"OFDM 54 Mbps", MCS_VALID},
178 		{"OFDM 36 Mbps", MCS_VALID},
179 		{"OFDM 18 Mbps", MCS_VALID},
180 		{"OFDM 9 Mbps ", MCS_VALID},
181 		{"INVALID ", MCS_INVALID},
182 		{"INVALID ", MCS_INVALID},
183 		{"INVALID ", MCS_INVALID},
184 		{"INVALID ", MCS_INVALID},
185 		{"INVALID ", MCS_VALID},
186 	},
187 	{
188 		{"CCK 11 Mbps Long  ", MCS_VALID},
189 		{"CCK 5.5 Mbps Long ", MCS_VALID},
190 		{"CCK 2 Mbps Long   ", MCS_VALID},
191 		{"CCK 1 Mbps Long   ", MCS_VALID},
192 		{"CCK 11 Mbps Short ", MCS_VALID},
193 		{"CCK 5.5 Mbps Short", MCS_VALID},
194 		{"CCK 2 Mbps Short  ", MCS_VALID},
195 		{"INVALID ", MCS_INVALID},
196 		{"INVALID ", MCS_INVALID},
197 		{"INVALID ", MCS_INVALID},
198 		{"INVALID ", MCS_INVALID},
199 		{"INVALID ", MCS_INVALID},
200 		{"INVALID ", MCS_VALID},
201 	},
202 	{
203 		{"HT MCS 0 (BPSK 1/2)  ", MCS_VALID},
204 		{"HT MCS 1 (QPSK 1/2)  ", MCS_VALID},
205 		{"HT MCS 2 (QPSK 3/4)  ", MCS_VALID},
206 		{"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
207 		{"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
208 		{"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
209 		{"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
210 		{"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
211 		{"INVALID ", MCS_INVALID},
212 		{"INVALID ", MCS_INVALID},
213 		{"INVALID ", MCS_INVALID},
214 		{"INVALID ", MCS_INVALID},
215 		{"INVALID ", MCS_VALID},
216 	},
217 	{
218 		{"VHT MCS 0 (BPSK 1/2)     ", MCS_VALID},
219 		{"VHT MCS 1 (QPSK 1/2)     ", MCS_VALID},
220 		{"VHT MCS 2 (QPSK 3/4)     ", MCS_VALID},
221 		{"VHT MCS 3 (16-QAM 1/2)   ", MCS_VALID},
222 		{"VHT MCS 4 (16-QAM 3/4)   ", MCS_VALID},
223 		{"VHT MCS 5 (64-QAM 2/3)   ", MCS_VALID},
224 		{"VHT MCS 6 (64-QAM 3/4)   ", MCS_VALID},
225 		{"VHT MCS 7 (64-QAM 5/6)   ", MCS_VALID},
226 		{"VHT MCS 8 (256-QAM 3/4)  ", MCS_VALID},
227 		{"VHT MCS 9 (256-QAM 5/6)  ", MCS_VALID},
228 		{"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
229 		{"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
230 		{"INVALID ", MCS_VALID},
231 	},
232 	{
233 		{"HE MCS 0 (BPSK 1/2)     ", MCS_VALID},
234 		{"HE MCS 1 (QPSK 1/2)     ", MCS_VALID},
235 		{"HE MCS 2 (QPSK 3/4)     ", MCS_VALID},
236 		{"HE MCS 3 (16-QAM 1/2)   ", MCS_VALID},
237 		{"HE MCS 4 (16-QAM 3/4)   ", MCS_VALID},
238 		{"HE MCS 5 (64-QAM 2/3)   ", MCS_VALID},
239 		{"HE MCS 6 (64-QAM 3/4)   ", MCS_VALID},
240 		{"HE MCS 7 (64-QAM 5/6)   ", MCS_VALID},
241 		{"HE MCS 8 (256-QAM 3/4)  ", MCS_VALID},
242 		{"HE MCS 9 (256-QAM 5/6)  ", MCS_VALID},
243 		{"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
244 		{"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
245 		{"INVALID ", MCS_VALID},
246 	}
247 };
248 
249 /**
250  * dp_cpu_ring_map_type - dp tx cpu ring map
251  * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded
252  * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded
253  * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded
254  * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded
255  * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded
256  * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val
257  */
258 enum dp_cpu_ring_map_types {
259 	DP_NSS_DEFAULT_MAP,
260 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
261 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
262 	DP_NSS_DBDC_OFFLOADED_MAP,
263 	DP_NSS_DBTC_OFFLOADED_MAP,
264 	DP_NSS_CPU_RING_MAP_MAX
265 };
266 
267 /**
268  * @brief Cpu to tx ring map
269  */
270 #ifdef CONFIG_WIN
271 static uint8_t
272 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
273 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
274 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
275 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
276 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
277 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
278 };
279 #else
280 static uint8_t
281 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
282 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
283 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
284 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
285 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
286 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
287 };
288 #endif
289 
290 /**
291  * @brief Select the type of statistics
292  */
293 enum dp_stats_type {
294 	STATS_FW = 0,
295 	STATS_HOST = 1,
296 	STATS_TYPE_MAX = 2,
297 };
298 
299 /**
300  * @brief General Firmware statistics options
301  *
302  */
303 enum dp_fw_stats {
304 	TXRX_FW_STATS_INVALID	= -1,
305 };
306 
307 /**
308  * dp_stats_mapping_table - Firmware and Host statistics
309  * currently supported
310  */
311 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
312 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
313 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
314 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
315 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
316 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
317 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
318 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
319 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
320 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
321 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
322 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
323 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
324 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
325 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
326 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
327 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
328 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
329 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
330 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
331 	/* Last ENUM for HTT FW STATS */
332 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
333 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
334 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
335 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
336 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
337 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
338 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
339 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
340 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
341 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
342 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
343 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
344 };
345 
346 /* MCL specific functions */
347 #ifdef CONFIG_MCL
348 /**
349  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
350  * @soc: pointer to dp_soc handle
351  * @intr_ctx_num: interrupt context number for which mon mask is needed
352  *
353  * For MCL, monitor mode rings are being processed in timer contexts (polled).
354  * This function is returning 0, since in interrupt mode(softirq based RX),
355  * we donot want to process monitor mode rings in a softirq.
356  *
357  * So, in case packet log is enabled for SAP/STA/P2P modes,
358  * regular interrupt processing will not process monitor mode rings. It would be
359  * done in a separate timer context.
360  *
361  * Return: 0
362  */
363 static inline
364 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
365 {
366 	return 0;
367 }
368 
369 /*
370  * dp_service_mon_rings()- timer to reap monitor rings
371  * reqd as we are not getting ppdu end interrupts
372  * @arg: SoC Handle
373  *
374  * Return:
375  *
376  */
377 static void dp_service_mon_rings(void *arg)
378 {
379 	struct dp_soc *soc = (struct dp_soc *)arg;
380 	int ring = 0, work_done, mac_id;
381 	struct dp_pdev *pdev = NULL;
382 
383 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
384 		pdev = soc->pdev_list[ring];
385 		if (!pdev)
386 			continue;
387 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
388 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
389 								pdev->pdev_id);
390 			work_done = dp_mon_process(soc, mac_for_pdev,
391 						   QCA_NAPI_BUDGET);
392 
393 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
394 				  FL("Reaped %d descs from Monitor rings"),
395 				  work_done);
396 		}
397 	}
398 
399 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
400 }
401 
402 #ifndef REMOVE_PKT_LOG
403 /**
404  * dp_pkt_log_init() - API to initialize packet log
405  * @ppdev: physical device handle
406  * @scn: HIF context
407  *
408  * Return: none
409  */
410 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
411 {
412 	struct dp_pdev *handle = (struct dp_pdev *)ppdev;
413 
414 	if (handle->pkt_log_init) {
415 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
416 			  "%s: Packet log not initialized", __func__);
417 		return;
418 	}
419 
420 	pktlog_sethandle(&handle->pl_dev, scn);
421 	pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
422 
423 	if (pktlogmod_init(scn)) {
424 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
425 			  "%s: pktlogmod_init failed", __func__);
426 		handle->pkt_log_init = false;
427 	} else {
428 		handle->pkt_log_init = true;
429 	}
430 }
431 
432 /**
433  * dp_pkt_log_con_service() - connect packet log service
434  * @ppdev: physical device handle
435  * @scn: device context
436  *
437  * Return: none
438  */
439 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
440 {
441 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
442 
443 	dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
444 	pktlog_htc_attach();
445 }
446 
447 /**
448  * dp_get_num_rx_contexts() - get number of RX contexts
449  * @soc_hdl: cdp opaque soc handle
450  *
451  * Return: number of RX contexts
452  */
453 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
454 {
455 	int i;
456 	int num_rx_contexts = 0;
457 
458 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
459 
460 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
461 		if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
462 			num_rx_contexts++;
463 
464 	return num_rx_contexts;
465 }
466 
467 /**
468  * dp_pktlogmod_exit() - API to cleanup pktlog info
469  * @handle: Pdev handle
470  *
471  * Return: none
472  */
473 static void dp_pktlogmod_exit(struct dp_pdev *handle)
474 {
475 	void *scn = (void *)handle->soc->hif_handle;
476 
477 	if (!scn) {
478 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
479 			  "%s: Invalid hif(scn) handle", __func__);
480 		return;
481 	}
482 
483 	pktlogmod_exit(scn);
484 	handle->pkt_log_init = false;
485 }
486 #endif
487 #else
488 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
489 
490 /**
491  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
492  * @soc: pointer to dp_soc handle
493  * @intr_ctx_num: interrupt context number for which mon mask is needed
494  *
495  * Return: mon mask value
496  */
497 static inline
498 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
499 {
500 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
501 }
502 #endif
503 
504 /**
505  * dp_get_dp_vdev_from_cdp_vdev() - get dp_vdev from cdp_vdev by type-casting
506  * @cdp_opaque_vdev: pointer to cdp_vdev
507  *
508  * Return: pointer to dp_vdev
509  */
510 static
511 struct dp_vdev *dp_get_dp_vdev_from_cdp_vdev(struct cdp_vdev *cdp_opaque_vdev)
512 {
513 	return (struct dp_vdev *)cdp_opaque_vdev;
514 }
515 
516 
517 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
518 					struct cdp_peer *peer_hdl,
519 					uint8_t *mac_addr,
520 					enum cdp_txrx_ast_entry_type type,
521 					uint32_t flags)
522 {
523 
524 	return dp_peer_add_ast((struct dp_soc *)soc_hdl,
525 				(struct dp_peer *)peer_hdl,
526 				mac_addr,
527 				type,
528 				flags);
529 }
530 
531 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
532 						struct cdp_peer *peer_hdl,
533 						uint8_t *wds_macaddr,
534 						uint32_t flags)
535 {
536 	int status = -1;
537 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
538 	struct dp_ast_entry  *ast_entry = NULL;
539 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
540 
541 	qdf_spin_lock_bh(&soc->ast_lock);
542 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
543 						    peer->vdev->pdev->pdev_id);
544 
545 	if (ast_entry) {
546 		status = dp_peer_update_ast(soc,
547 					    peer,
548 					    ast_entry, flags);
549 	}
550 
551 	qdf_spin_unlock_bh(&soc->ast_lock);
552 
553 	return status;
554 }
555 
556 /*
557  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
558  * @soc_handle:		Datapath SOC handle
559  * @wds_macaddr:	WDS entry MAC Address
560  * Return: None
561  */
562 static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
563 				   uint8_t *wds_macaddr, void *vdev_handle)
564 {
565 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
566 	struct dp_ast_entry *ast_entry = NULL;
567 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
568 
569 	qdf_spin_lock_bh(&soc->ast_lock);
570 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
571 						    vdev->pdev->pdev_id);
572 
573 	if (ast_entry) {
574 		if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
575 			(ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
576 			(ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS)) {
577 			ast_entry->is_active = TRUE;
578 		}
579 	}
580 
581 	qdf_spin_unlock_bh(&soc->ast_lock);
582 }
583 
584 /*
585  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
586  * @soc:		Datapath SOC handle
587  *
588  * Return: None
589  */
590 static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
591 					 void *vdev_hdl)
592 {
593 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
594 	struct dp_pdev *pdev;
595 	struct dp_vdev *vdev;
596 	struct dp_peer *peer;
597 	struct dp_ast_entry *ase, *temp_ase;
598 	int i;
599 
600 	qdf_spin_lock_bh(&soc->ast_lock);
601 
602 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
603 		pdev = soc->pdev_list[i];
604 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
605 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
606 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
607 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
608 					if ((ase->type ==
609 						CDP_TXRX_AST_TYPE_STATIC) ||
610 						(ase->type ==
611 						CDP_TXRX_AST_TYPE_SELF) ||
612 						(ase->type ==
613 						CDP_TXRX_AST_TYPE_STA_BSS))
614 						continue;
615 					ase->is_active = TRUE;
616 				}
617 			}
618 		}
619 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
620 	}
621 
622 	qdf_spin_unlock_bh(&soc->ast_lock);
623 }
624 
625 /*
626  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
627  * @soc:		Datapath SOC handle
628  *
629  * Return: None
630  */
631 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
632 {
633 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
634 	struct dp_pdev *pdev;
635 	struct dp_vdev *vdev;
636 	struct dp_peer *peer;
637 	struct dp_ast_entry *ase, *temp_ase;
638 	int i;
639 
640 	qdf_spin_lock_bh(&soc->ast_lock);
641 
642 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
643 		pdev = soc->pdev_list[i];
644 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
645 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
646 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
647 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
648 					if ((ase->type ==
649 						CDP_TXRX_AST_TYPE_STATIC) ||
650 						(ase->type ==
651 						 CDP_TXRX_AST_TYPE_SELF) ||
652 						(ase->type ==
653 						 CDP_TXRX_AST_TYPE_STA_BSS))
654 						continue;
655 					dp_peer_del_ast(soc, ase);
656 				}
657 			}
658 		}
659 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
660 	}
661 
662 	qdf_spin_unlock_bh(&soc->ast_lock);
663 }
664 
665 /**
666  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
667  *                                       and return ast entry information
668  *                                       of first ast entry found in the
669  *                                       table with given mac address
670  *
671  * @soc : data path soc handle
672  * @ast_mac_addr : AST entry mac address
673  * @ast_entry_info : ast entry information
674  *
675  * return : true if ast entry found with ast_mac_addr
676  *          false if ast entry not found
677  */
678 static bool dp_peer_get_ast_info_by_soc_wifi3
679 	(struct cdp_soc_t *soc_hdl,
680 	 uint8_t *ast_mac_addr,
681 	 struct cdp_ast_entry_info *ast_entry_info)
682 {
683 	struct dp_ast_entry *ast_entry;
684 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
685 
686 	qdf_spin_lock_bh(&soc->ast_lock);
687 
688 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
689 
690 	if (ast_entry && !ast_entry->delete_in_progress) {
691 		ast_entry_info->type = ast_entry->type;
692 		ast_entry_info->pdev_id = ast_entry->pdev_id;
693 		ast_entry_info->vdev_id = ast_entry->vdev_id;
694 		ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
695 		qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
696 			     &ast_entry->peer->mac_addr.raw[0],
697 			     DP_MAC_ADDR_LEN);
698 		qdf_spin_unlock_bh(&soc->ast_lock);
699 		return true;
700 	}
701 
702 	qdf_spin_unlock_bh(&soc->ast_lock);
703 	return false;
704 }
705 
706 /**
707  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
708  *                                          and return ast entry information
709  *                                          if mac address and pdev_id matches
710  *
711  * @soc : data path soc handle
712  * @ast_mac_addr : AST entry mac address
713  * @pdev_id : pdev_id
714  * @ast_entry_info : ast entry information
715  *
716  * return : true if ast entry found with ast_mac_addr
717  *          false if ast entry not found
718  */
719 static bool dp_peer_get_ast_info_by_pdevid_wifi3
720 		(struct cdp_soc_t *soc_hdl,
721 		 uint8_t *ast_mac_addr,
722 		 uint8_t pdev_id,
723 		 struct cdp_ast_entry_info *ast_entry_info)
724 {
725 	struct dp_ast_entry *ast_entry;
726 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
727 
728 	qdf_spin_lock_bh(&soc->ast_lock);
729 
730 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, pdev_id);
731 
732 	if (ast_entry && !ast_entry->delete_in_progress) {
733 		ast_entry_info->type = ast_entry->type;
734 		ast_entry_info->pdev_id = ast_entry->pdev_id;
735 		ast_entry_info->vdev_id = ast_entry->vdev_id;
736 		ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
737 		qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
738 			     &ast_entry->peer->mac_addr.raw[0],
739 			     DP_MAC_ADDR_LEN);
740 		qdf_spin_unlock_bh(&soc->ast_lock);
741 		return true;
742 	}
743 
744 	qdf_spin_unlock_bh(&soc->ast_lock);
745 	return false;
746 }
747 
748 /**
749  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
750  *                            with given mac address
751  *
752  * @soc : data path soc handle
753  * @ast_mac_addr : AST entry mac address
754  * @callback : callback function to called on ast delete response from FW
755  * @cookie : argument to be passed to callback
756  *
757  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
758  *          is sent
759  *          QDF_STATUS_E_INVAL false if ast entry not found
760  */
761 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
762 					       uint8_t *mac_addr,
763 					       txrx_ast_free_cb callback,
764 					       void *cookie)
765 
766 {
767 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
768 	struct dp_ast_entry *ast_entry;
769 	txrx_ast_free_cb cb = NULL;
770 	void *arg = NULL;
771 
772 	qdf_spin_lock_bh(&soc->ast_lock);
773 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
774 	if (!ast_entry) {
775 		qdf_spin_unlock_bh(&soc->ast_lock);
776 		return -QDF_STATUS_E_INVAL;
777 	}
778 
779 	if (ast_entry->callback) {
780 		cb = ast_entry->callback;
781 		arg = ast_entry->cookie;
782 	}
783 
784 	ast_entry->callback = callback;
785 	ast_entry->cookie = cookie;
786 
787 	/*
788 	 * if delete_in_progress is set AST delete is sent to target
789 	 * and host is waiting for response should not send delete
790 	 * again
791 	 */
792 	if (!ast_entry->delete_in_progress)
793 		dp_peer_del_ast(soc, ast_entry);
794 
795 	qdf_spin_unlock_bh(&soc->ast_lock);
796 	if (cb) {
797 		cb(soc->ctrl_psoc,
798 		   soc,
799 		   arg,
800 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
801 	}
802 	return QDF_STATUS_SUCCESS;
803 }
804 
805 /**
806  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
807  *                                   table if mac address and pdev_id matches
808  *
809  * @soc : data path soc handle
810  * @ast_mac_addr : AST entry mac address
811  * @pdev_id : pdev id
812  * @callback : callback function to called on ast delete response from FW
813  * @cookie : argument to be passed to callback
814  *
815  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
816  *          is sent
817  *          QDF_STATUS_E_INVAL false if ast entry not found
818  */
819 
820 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
821 						uint8_t *mac_addr,
822 						uint8_t pdev_id,
823 						txrx_ast_free_cb callback,
824 						void *cookie)
825 
826 {
827 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
828 	struct dp_ast_entry *ast_entry;
829 	txrx_ast_free_cb cb = NULL;
830 	void *arg = NULL;
831 
832 	qdf_spin_lock_bh(&soc->ast_lock);
833 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
834 
835 	if (!ast_entry) {
836 		qdf_spin_unlock_bh(&soc->ast_lock);
837 		return -QDF_STATUS_E_INVAL;
838 	}
839 
840 	if (ast_entry->callback) {
841 		cb = ast_entry->callback;
842 		arg = ast_entry->cookie;
843 	}
844 
845 	ast_entry->callback = callback;
846 	ast_entry->cookie = cookie;
847 
848 	/*
849 	 * if delete_in_progress is set AST delete is sent to target
850 	 * and host is waiting for response should not sent delete
851 	 * again
852 	 */
853 	if (!ast_entry->delete_in_progress)
854 		dp_peer_del_ast(soc, ast_entry);
855 
856 	qdf_spin_unlock_bh(&soc->ast_lock);
857 
858 	if (cb) {
859 		cb(soc->ctrl_psoc,
860 		   soc,
861 		   arg,
862 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
863 	}
864 	return QDF_STATUS_SUCCESS;
865 }
866 
867 /**
868  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
869  * @ring_num: ring num of the ring being queried
870  * @grp_mask: the grp_mask array for the ring type in question.
871  *
872  * The grp_mask array is indexed by group number and the bit fields correspond
873  * to ring numbers.  We are finding which interrupt group a ring belongs to.
874  *
875  * Return: the index in the grp_mask array with the ring number.
876  * -QDF_STATUS_E_NOENT if no entry is found
877  */
878 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
879 {
880 	int ext_group_num;
881 	int mask = 1 << ring_num;
882 
883 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
884 	     ext_group_num++) {
885 		if (mask & grp_mask[ext_group_num])
886 			return ext_group_num;
887 	}
888 
889 	return -QDF_STATUS_E_NOENT;
890 }
891 
892 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
893 				       enum hal_ring_type ring_type,
894 				       int ring_num)
895 {
896 	int *grp_mask;
897 
898 	switch (ring_type) {
899 	case WBM2SW_RELEASE:
900 		/* dp_tx_comp_handler - soc->tx_comp_ring */
901 		if (ring_num < 3)
902 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
903 
904 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
905 		else if (ring_num == 3) {
906 			/* sw treats this as a separate ring type */
907 			grp_mask = &soc->wlan_cfg_ctx->
908 				int_rx_wbm_rel_ring_mask[0];
909 			ring_num = 0;
910 		} else {
911 			qdf_assert(0);
912 			return -QDF_STATUS_E_NOENT;
913 		}
914 	break;
915 
916 	case REO_EXCEPTION:
917 		/* dp_rx_err_process - &soc->reo_exception_ring */
918 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
919 	break;
920 
921 	case REO_DST:
922 		/* dp_rx_process - soc->reo_dest_ring */
923 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
924 	break;
925 
926 	case REO_STATUS:
927 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
928 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
929 	break;
930 
931 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
932 	case RXDMA_MONITOR_STATUS:
933 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
934 	case RXDMA_MONITOR_DST:
935 		/* dp_mon_process */
936 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
937 	break;
938 	case RXDMA_DST:
939 		/* dp_rxdma_err_process */
940 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
941 	break;
942 
943 	case RXDMA_BUF:
944 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
945 	break;
946 
947 	case RXDMA_MONITOR_BUF:
948 		/* TODO: support low_thresh interrupt */
949 		return -QDF_STATUS_E_NOENT;
950 	break;
951 
952 	case TCL_DATA:
953 	case TCL_CMD:
954 	case REO_CMD:
955 	case SW2WBM_RELEASE:
956 	case WBM_IDLE_LINK:
957 		/* normally empty SW_TO_HW rings */
958 		return -QDF_STATUS_E_NOENT;
959 	break;
960 
961 	case TCL_STATUS:
962 	case REO_REINJECT:
963 		/* misc unused rings */
964 		return -QDF_STATUS_E_NOENT;
965 	break;
966 
967 	case CE_SRC:
968 	case CE_DST:
969 	case CE_DST_STATUS:
970 		/* CE_rings - currently handled by hif */
971 	default:
972 		return -QDF_STATUS_E_NOENT;
973 	break;
974 	}
975 
976 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
977 }
978 
979 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
980 			      *ring_params, int ring_type, int ring_num)
981 {
982 	int msi_group_number;
983 	int msi_data_count;
984 	int ret;
985 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
986 
987 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
988 					    &msi_data_count, &msi_data_start,
989 					    &msi_irq_start);
990 
991 	if (ret)
992 		return;
993 
994 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
995 						       ring_num);
996 	if (msi_group_number < 0) {
997 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
998 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
999 			ring_type, ring_num);
1000 		ring_params->msi_addr = 0;
1001 		ring_params->msi_data = 0;
1002 		return;
1003 	}
1004 
1005 	if (msi_group_number > msi_data_count) {
1006 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
1007 			FL("2 msi_groups will share an msi; msi_group_num %d"),
1008 			msi_group_number);
1009 
1010 		QDF_ASSERT(0);
1011 	}
1012 
1013 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1014 
1015 	ring_params->msi_addr = addr_low;
1016 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1017 	ring_params->msi_data = (msi_group_number % msi_data_count)
1018 		+ msi_data_start;
1019 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1020 }
1021 
1022 /**
1023  * dp_print_ast_stats() - Dump AST table contents
1024  * @soc: Datapath soc handle
1025  *
1026  * return void
1027  */
1028 #ifdef FEATURE_AST
1029 void dp_print_ast_stats(struct dp_soc *soc)
1030 {
1031 	uint8_t i;
1032 	uint8_t num_entries = 0;
1033 	struct dp_vdev *vdev;
1034 	struct dp_pdev *pdev;
1035 	struct dp_peer *peer;
1036 	struct dp_ast_entry *ase, *tmp_ase;
1037 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1038 			"NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
1039 			"DA", "HMWDS_SEC"};
1040 
1041 	DP_PRINT_STATS("AST Stats:");
1042 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1043 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1044 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1045 	DP_PRINT_STATS("AST Table:");
1046 
1047 	qdf_spin_lock_bh(&soc->ast_lock);
1048 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1049 		pdev = soc->pdev_list[i];
1050 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
1051 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1052 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1053 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1054 					DP_PRINT_STATS("%6d mac_addr = %pM"
1055 							" peer_mac_addr = %pM"
1056 							" peer_id = %u"
1057 							" type = %s"
1058 							" next_hop = %d"
1059 							" is_active = %d"
1060 							" is_bss = %d"
1061 							" ast_idx = %d"
1062 							" ast_hash = %d"
1063 							" delete_in_progress = %d"
1064 							" pdev_id = %d"
1065 							" vdev_id = %d",
1066 							++num_entries,
1067 							ase->mac_addr.raw,
1068 							ase->peer->mac_addr.raw,
1069 							ase->peer->peer_ids[0],
1070 							type[ase->type],
1071 							ase->next_hop,
1072 							ase->is_active,
1073 							ase->is_bss,
1074 							ase->ast_idx,
1075 							ase->ast_hash_value,
1076 							ase->delete_in_progress,
1077 							ase->pdev_id,
1078 							ase->vdev_id);
1079 				}
1080 			}
1081 		}
1082 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1083 	}
1084 	qdf_spin_unlock_bh(&soc->ast_lock);
1085 }
1086 #else
1087 void dp_print_ast_stats(struct dp_soc *soc)
1088 {
1089 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1090 	return;
1091 }
1092 #endif
1093 
1094 /**
1095  *  dp_print_peer_table() - Dump all Peer stats
1096  * @vdev: Datapath Vdev handle
1097  *
1098  * return void
1099  */
1100 static void dp_print_peer_table(struct dp_vdev *vdev)
1101 {
1102 	struct dp_peer *peer = NULL;
1103 
1104 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1105 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1106 		if (!peer) {
1107 			DP_PRINT_STATS("Invalid Peer");
1108 			return;
1109 		}
1110 		DP_PRINT_STATS("    peer_mac_addr = %pM"
1111 			       " nawds_enabled = %d"
1112 			       " bss_peer = %d"
1113 			       " wapi = %d"
1114 			       " wds_enabled = %d"
1115 			       " delete in progress = %d"
1116 			       " peer id = %d",
1117 			       peer->mac_addr.raw,
1118 			       peer->nawds_enabled,
1119 			       peer->bss_peer,
1120 			       peer->wapi,
1121 			       peer->wds_enabled,
1122 			       peer->delete_in_progress,
1123 			       peer->peer_ids[0]);
1124 	}
1125 }
1126 
1127 /*
1128  * dp_setup_srng - Internal function to setup SRNG rings used by data path
1129  */
1130 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
1131 	int ring_type, int ring_num, int mac_id, uint32_t num_entries)
1132 {
1133 	void *hal_soc = soc->hal_soc;
1134 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
1135 	/* TODO: See if we should get align size from hal */
1136 	uint32_t ring_base_align = 8;
1137 	struct hal_srng_params ring_params;
1138 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
1139 
1140 	/* TODO: Currently hal layer takes care of endianness related settings.
1141 	 * See if these settings need to passed from DP layer
1142 	 */
1143 	ring_params.flags = 0;
1144 
1145 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
1146 	srng->hal_srng = NULL;
1147 	srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
1148 	srng->num_entries = num_entries;
1149 
1150 	if (!dp_is_soc_reinit(soc)) {
1151 		srng->base_vaddr_unaligned =
1152 			qdf_mem_alloc_consistent(soc->osdev,
1153 						 soc->osdev->dev,
1154 						 srng->alloc_size,
1155 						 &srng->base_paddr_unaligned);
1156 	}
1157 
1158 	if (!srng->base_vaddr_unaligned) {
1159 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1160 			FL("alloc failed - ring_type: %d, ring_num %d"),
1161 			ring_type, ring_num);
1162 		return QDF_STATUS_E_NOMEM;
1163 	}
1164 
1165 	ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
1166 		((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
1167 	ring_params.ring_base_paddr = srng->base_paddr_unaligned +
1168 		((unsigned long)(ring_params.ring_base_vaddr) -
1169 		(unsigned long)srng->base_vaddr_unaligned);
1170 	ring_params.num_entries = num_entries;
1171 
1172 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1173 		  FL("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u"),
1174 		  ring_type, ring_num, (void *)ring_params.ring_base_vaddr,
1175 		  (void *)ring_params.ring_base_paddr, ring_params.num_entries);
1176 
1177 	if (soc->intr_mode == DP_INTR_MSI) {
1178 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
1179 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1180 			  FL("Using MSI for ring_type: %d, ring_num %d"),
1181 			  ring_type, ring_num);
1182 
1183 	} else {
1184 		ring_params.msi_data = 0;
1185 		ring_params.msi_addr = 0;
1186 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1187 			  FL("Skipping MSI for ring_type: %d, ring_num %d"),
1188 			  ring_type, ring_num);
1189 	}
1190 
1191 	/*
1192 	 * Setup interrupt timer and batch counter thresholds for
1193 	 * interrupt mitigation based on ring type
1194 	 */
1195 	if (ring_type == REO_DST) {
1196 		ring_params.intr_timer_thres_us =
1197 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1198 		ring_params.intr_batch_cntr_thres_entries =
1199 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1200 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
1201 		ring_params.intr_timer_thres_us =
1202 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1203 		ring_params.intr_batch_cntr_thres_entries =
1204 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1205 	} else {
1206 		ring_params.intr_timer_thres_us =
1207 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1208 		ring_params.intr_batch_cntr_thres_entries =
1209 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1210 	}
1211 
1212 	/* Enable low threshold interrupts for rx buffer rings (regular and
1213 	 * monitor buffer rings.
1214 	 * TODO: See if this is required for any other ring
1215 	 */
1216 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1217 		(ring_type == RXDMA_MONITOR_STATUS)) {
1218 		/* TODO: Setting low threshold to 1/8th of ring size
1219 		 * see if this needs to be configurable
1220 		 */
1221 		ring_params.low_threshold = num_entries >> 3;
1222 		ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1223 		ring_params.intr_timer_thres_us =
1224 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1225 		ring_params.intr_batch_cntr_thres_entries = 0;
1226 	}
1227 
1228 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
1229 		mac_id, &ring_params);
1230 
1231 	if (!srng->hal_srng) {
1232 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1233 				srng->alloc_size,
1234 				srng->base_vaddr_unaligned,
1235 				srng->base_paddr_unaligned, 0);
1236 	}
1237 
1238 	return 0;
1239 }
1240 
1241 /*
1242  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
1243  * @soc: DP SOC handle
1244  * @srng: source ring structure
1245  * @ring_type: type of ring
1246  * @ring_num: ring number
1247  *
1248  * Return: None
1249  */
1250 static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
1251 			   int ring_type, int ring_num)
1252 {
1253 	if (!srng->hal_srng) {
1254 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1255 			  FL("Ring type: %d, num:%d not setup"),
1256 			  ring_type, ring_num);
1257 		return;
1258 	}
1259 
1260 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1261 	srng->hal_srng = NULL;
1262 }
1263 
1264 /**
1265  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
1266  * Any buffers allocated and attached to ring entries are expected to be freed
1267  * before calling this function.
1268  */
1269 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
1270 	int ring_type, int ring_num)
1271 {
1272 	if (!dp_is_soc_reinit(soc)) {
1273 		if (!srng->hal_srng && (srng->alloc_size == 0)) {
1274 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1275 				  FL("Ring type: %d, num:%d not setup"),
1276 				  ring_type, ring_num);
1277 			return;
1278 		}
1279 
1280 		if (srng->hal_srng) {
1281 			hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1282 			srng->hal_srng = NULL;
1283 		}
1284 	}
1285 
1286 	if (srng->alloc_size) {
1287 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1288 					srng->alloc_size,
1289 					srng->base_vaddr_unaligned,
1290 					srng->base_paddr_unaligned, 0);
1291 		srng->alloc_size = 0;
1292 	}
1293 }
1294 
1295 /* TODO: Need this interface from HIF */
1296 void *hif_get_hal_handle(void *hif_handle);
1297 
1298 /*
1299  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1300  * @dp_ctx: DP SOC handle
1301  * @budget: Number of frames/descriptors that can be processed in one shot
1302  *
1303  * Return: remaining budget/quota for the soc device
1304  */
1305 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
1306 {
1307 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1308 	struct dp_soc *soc = int_ctx->soc;
1309 	int ring = 0;
1310 	uint32_t work_done  = 0;
1311 	int budget = dp_budget;
1312 	uint8_t tx_mask = int_ctx->tx_ring_mask;
1313 	uint8_t rx_mask = int_ctx->rx_ring_mask;
1314 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1315 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1316 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1317 	uint32_t remaining_quota = dp_budget;
1318 	struct dp_pdev *pdev = NULL;
1319 	int mac_id;
1320 
1321 	/* Process Tx completion interrupts first to return back buffers */
1322 	while (tx_mask) {
1323 		if (tx_mask & 0x1) {
1324 			work_done = dp_tx_comp_handler(soc,
1325 					soc->tx_comp_ring[ring].hal_srng,
1326 					remaining_quota);
1327 
1328 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1329 				  "tx mask 0x%x ring %d, budget %d, work_done %d",
1330 				  tx_mask, ring, budget, work_done);
1331 
1332 			budget -= work_done;
1333 			if (budget <= 0)
1334 				goto budget_done;
1335 
1336 			remaining_quota = budget;
1337 		}
1338 		tx_mask = tx_mask >> 1;
1339 		ring++;
1340 	}
1341 
1342 
1343 	/* Process REO Exception ring interrupt */
1344 	if (rx_err_mask) {
1345 		work_done = dp_rx_err_process(soc,
1346 				soc->reo_exception_ring.hal_srng,
1347 				remaining_quota);
1348 
1349 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1350 			"REO Exception Ring: work_done %d budget %d",
1351 			work_done, budget);
1352 
1353 		budget -=  work_done;
1354 		if (budget <= 0) {
1355 			goto budget_done;
1356 		}
1357 		remaining_quota = budget;
1358 	}
1359 
1360 	/* Process Rx WBM release ring interrupt */
1361 	if (rx_wbm_rel_mask) {
1362 		work_done = dp_rx_wbm_err_process(soc,
1363 				soc->rx_rel_ring.hal_srng, remaining_quota);
1364 
1365 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1366 			"WBM Release Ring: work_done %d budget %d",
1367 			work_done, budget);
1368 
1369 		budget -=  work_done;
1370 		if (budget <= 0) {
1371 			goto budget_done;
1372 		}
1373 		remaining_quota = budget;
1374 	}
1375 
1376 	/* Process Rx interrupts */
1377 	if (rx_mask) {
1378 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1379 			if (rx_mask & (1 << ring)) {
1380 				work_done = dp_rx_process(int_ctx,
1381 					    soc->reo_dest_ring[ring].hal_srng,
1382 					    ring,
1383 					    remaining_quota);
1384 
1385 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1386 					"rx mask 0x%x ring %d, work_done %d budget %d",
1387 					rx_mask, ring, work_done, budget);
1388 
1389 				budget -=  work_done;
1390 				if (budget <= 0)
1391 					goto budget_done;
1392 				remaining_quota = budget;
1393 			}
1394 		}
1395 	}
1396 
1397 	if (reo_status_mask)
1398 		dp_reo_status_ring_handler(soc);
1399 
1400 	/* Process LMAC interrupts */
1401 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
1402 		pdev = soc->pdev_list[ring];
1403 		if (pdev == NULL)
1404 			continue;
1405 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1406 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
1407 								pdev->pdev_id);
1408 
1409 			if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1410 				work_done = dp_mon_process(soc, mac_for_pdev,
1411 						remaining_quota);
1412 				budget -= work_done;
1413 				if (budget <= 0)
1414 					goto budget_done;
1415 				remaining_quota = budget;
1416 			}
1417 
1418 			if (int_ctx->rxdma2host_ring_mask &
1419 					(1 << mac_for_pdev)) {
1420 				work_done = dp_rxdma_err_process(soc,
1421 							mac_for_pdev,
1422 							remaining_quota);
1423 				budget -=  work_done;
1424 				if (budget <= 0)
1425 					goto budget_done;
1426 				remaining_quota = budget;
1427 			}
1428 
1429 			if (int_ctx->host2rxdma_ring_mask &
1430 						(1 << mac_for_pdev)) {
1431 				union dp_rx_desc_list_elem_t *desc_list = NULL;
1432 				union dp_rx_desc_list_elem_t *tail = NULL;
1433 				struct dp_srng *rx_refill_buf_ring =
1434 					&pdev->rx_refill_buf_ring;
1435 
1436 				DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1437 						1);
1438 				dp_rx_buffers_replenish(soc, mac_for_pdev,
1439 					rx_refill_buf_ring,
1440 					&soc->rx_desc_buf[mac_for_pdev], 0,
1441 					&desc_list, &tail);
1442 			}
1443 		}
1444 	}
1445 
1446 	qdf_lro_flush(int_ctx->lro_ctx);
1447 
1448 budget_done:
1449 	return dp_budget - budget;
1450 }
1451 
1452 /* dp_interrupt_timer()- timer poll for interrupts
1453  *
1454  * @arg: SoC Handle
1455  *
1456  * Return:
1457  *
1458  */
1459 static void dp_interrupt_timer(void *arg)
1460 {
1461 	struct dp_soc *soc = (struct dp_soc *) arg;
1462 	int i;
1463 
1464 	if (qdf_atomic_read(&soc->cmn_init_done)) {
1465 		for (i = 0;
1466 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1467 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
1468 
1469 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1470 	}
1471 }
1472 
1473 /*
1474  * dp_soc_attach_poll() - Register handlers for DP interrupts
1475  * @txrx_soc: DP SOC handle
1476  *
1477  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1478  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1479  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1480  *
1481  * Return: 0 for success, nonzero for failure.
1482  */
1483 static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
1484 {
1485 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1486 	int i;
1487 
1488 	soc->intr_mode = DP_INTR_POLL;
1489 
1490 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1491 		soc->intr_ctx[i].dp_intr_id = i;
1492 		soc->intr_ctx[i].tx_ring_mask =
1493 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1494 		soc->intr_ctx[i].rx_ring_mask =
1495 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1496 		soc->intr_ctx[i].rx_mon_ring_mask =
1497 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1498 		soc->intr_ctx[i].rx_err_ring_mask =
1499 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1500 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1501 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1502 		soc->intr_ctx[i].reo_status_ring_mask =
1503 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1504 		soc->intr_ctx[i].rxdma2host_ring_mask =
1505 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1506 		soc->intr_ctx[i].soc = soc;
1507 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1508 	}
1509 
1510 	qdf_timer_init(soc->osdev, &soc->int_timer,
1511 			dp_interrupt_timer, (void *)soc,
1512 			QDF_TIMER_TYPE_WAKE_APPS);
1513 
1514 	return QDF_STATUS_SUCCESS;
1515 }
1516 
1517 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
1518 #if defined(CONFIG_MCL)
1519 /*
1520  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1521  * @txrx_soc: DP SOC handle
1522  *
1523  * Call the appropriate attach function based on the mode of operation.
1524  * This is a WAR for enabling monitor mode.
1525  *
1526  * Return: 0 for success. nonzero for failure.
1527  */
1528 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1529 {
1530 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1531 
1532 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1533 	     con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
1534 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1535 				  "%s: Poll mode", __func__);
1536 		return dp_soc_attach_poll(txrx_soc);
1537 	} else {
1538 
1539 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1540 				  "%s: Interrupt  mode", __func__);
1541 		return dp_soc_interrupt_attach(txrx_soc);
1542 	}
1543 }
1544 #else
1545 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
1546 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1547 {
1548 	return dp_soc_attach_poll(txrx_soc);
1549 }
1550 #else
1551 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1552 {
1553 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1554 
1555 	if (hif_is_polled_mode_enabled(soc->hif_handle))
1556 		return dp_soc_attach_poll(txrx_soc);
1557 	else
1558 		return dp_soc_interrupt_attach(txrx_soc);
1559 }
1560 #endif
1561 #endif
1562 
1563 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1564 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1565 {
1566 	int j;
1567 	int num_irq = 0;
1568 
1569 	int tx_mask =
1570 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1571 	int rx_mask =
1572 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1573 	int rx_mon_mask =
1574 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1575 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1576 					soc->wlan_cfg_ctx, intr_ctx_num);
1577 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1578 					soc->wlan_cfg_ctx, intr_ctx_num);
1579 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1580 					soc->wlan_cfg_ctx, intr_ctx_num);
1581 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1582 					soc->wlan_cfg_ctx, intr_ctx_num);
1583 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1584 					soc->wlan_cfg_ctx, intr_ctx_num);
1585 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
1586 					soc->wlan_cfg_ctx, intr_ctx_num);
1587 
1588 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1589 
1590 		if (tx_mask & (1 << j)) {
1591 			irq_id_map[num_irq++] =
1592 				(wbm2host_tx_completions_ring1 - j);
1593 		}
1594 
1595 		if (rx_mask & (1 << j)) {
1596 			irq_id_map[num_irq++] =
1597 				(reo2host_destination_ring1 - j);
1598 		}
1599 
1600 		if (rxdma2host_ring_mask & (1 << j)) {
1601 			irq_id_map[num_irq++] =
1602 				rxdma2host_destination_ring_mac1 -
1603 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1604 		}
1605 
1606 		if (host2rxdma_ring_mask & (1 << j)) {
1607 			irq_id_map[num_irq++] =
1608 				host2rxdma_host_buf_ring_mac1 -
1609 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1610 		}
1611 
1612 		if (host2rxdma_mon_ring_mask & (1 << j)) {
1613 			irq_id_map[num_irq++] =
1614 				host2rxdma_monitor_ring1 -
1615 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1616 		}
1617 
1618 		if (rx_mon_mask & (1 << j)) {
1619 			irq_id_map[num_irq++] =
1620 				ppdu_end_interrupts_mac1 -
1621 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1622 			irq_id_map[num_irq++] =
1623 				rxdma2host_monitor_status_ring_mac1 -
1624 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1625 		}
1626 
1627 		if (rx_wbm_rel_ring_mask & (1 << j))
1628 			irq_id_map[num_irq++] = wbm2host_rx_release;
1629 
1630 		if (rx_err_ring_mask & (1 << j))
1631 			irq_id_map[num_irq++] = reo2host_exception;
1632 
1633 		if (reo_status_ring_mask & (1 << j))
1634 			irq_id_map[num_irq++] = reo2host_status;
1635 
1636 	}
1637 	*num_irq_r = num_irq;
1638 }
1639 
1640 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1641 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1642 		int msi_vector_count, int msi_vector_start)
1643 {
1644 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1645 					soc->wlan_cfg_ctx, intr_ctx_num);
1646 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1647 					soc->wlan_cfg_ctx, intr_ctx_num);
1648 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1649 					soc->wlan_cfg_ctx, intr_ctx_num);
1650 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1651 					soc->wlan_cfg_ctx, intr_ctx_num);
1652 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1653 					soc->wlan_cfg_ctx, intr_ctx_num);
1654 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1655 					soc->wlan_cfg_ctx, intr_ctx_num);
1656 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1657 					soc->wlan_cfg_ctx, intr_ctx_num);
1658 
1659 	unsigned int vector =
1660 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1661 	int num_irq = 0;
1662 
1663 	soc->intr_mode = DP_INTR_MSI;
1664 
1665 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1666 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1667 		irq_id_map[num_irq++] =
1668 			pld_get_msi_irq(soc->osdev->dev, vector);
1669 
1670 	*num_irq_r = num_irq;
1671 }
1672 
1673 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1674 				    int *irq_id_map, int *num_irq)
1675 {
1676 	int msi_vector_count, ret;
1677 	uint32_t msi_base_data, msi_vector_start;
1678 
1679 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1680 					    &msi_vector_count,
1681 					    &msi_base_data,
1682 					    &msi_vector_start);
1683 	if (ret)
1684 		return dp_soc_interrupt_map_calculate_integrated(soc,
1685 				intr_ctx_num, irq_id_map, num_irq);
1686 
1687 	else
1688 		dp_soc_interrupt_map_calculate_msi(soc,
1689 				intr_ctx_num, irq_id_map, num_irq,
1690 				msi_vector_count, msi_vector_start);
1691 }
1692 
1693 /*
1694  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1695  * @txrx_soc: DP SOC handle
1696  *
1697  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1698  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1699  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1700  *
1701  * Return: 0 for success. nonzero for failure.
1702  */
1703 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
1704 {
1705 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1706 
1707 	int i = 0;
1708 	int num_irq = 0;
1709 
1710 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1711 		int ret = 0;
1712 
1713 		/* Map of IRQ ids registered with one interrupt context */
1714 		int irq_id_map[HIF_MAX_GRP_IRQ];
1715 
1716 		int tx_mask =
1717 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1718 		int rx_mask =
1719 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1720 		int rx_mon_mask =
1721 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
1722 		int rx_err_ring_mask =
1723 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1724 		int rx_wbm_rel_ring_mask =
1725 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1726 		int reo_status_ring_mask =
1727 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1728 		int rxdma2host_ring_mask =
1729 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1730 		int host2rxdma_ring_mask =
1731 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1732 		int host2rxdma_mon_ring_mask =
1733 			wlan_cfg_get_host2rxdma_mon_ring_mask(
1734 				soc->wlan_cfg_ctx, i);
1735 
1736 		soc->intr_ctx[i].dp_intr_id = i;
1737 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1738 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1739 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1740 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1741 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1742 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1743 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1744 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1745 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
1746 			 host2rxdma_mon_ring_mask;
1747 
1748 		soc->intr_ctx[i].soc = soc;
1749 
1750 		num_irq = 0;
1751 
1752 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1753 					       &num_irq);
1754 
1755 		ret = hif_register_ext_group(soc->hif_handle,
1756 				num_irq, irq_id_map, dp_service_srngs,
1757 				&soc->intr_ctx[i], "dp_intr",
1758 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1759 
1760 		if (ret) {
1761 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1762 			FL("failed, ret = %d"), ret);
1763 
1764 			return QDF_STATUS_E_FAILURE;
1765 		}
1766 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1767 	}
1768 
1769 	hif_configure_ext_group_interrupts(soc->hif_handle);
1770 
1771 	return QDF_STATUS_SUCCESS;
1772 }
1773 
1774 /*
1775  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1776  * @txrx_soc: DP SOC handle
1777  *
1778  * Return: void
1779  */
1780 static void dp_soc_interrupt_detach(void *txrx_soc)
1781 {
1782 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1783 	int i;
1784 
1785 	if (soc->intr_mode == DP_INTR_POLL) {
1786 		qdf_timer_stop(&soc->int_timer);
1787 		qdf_timer_free(&soc->int_timer);
1788 	} else {
1789 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1790 	}
1791 
1792 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1793 		soc->intr_ctx[i].tx_ring_mask = 0;
1794 		soc->intr_ctx[i].rx_ring_mask = 0;
1795 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1796 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1797 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1798 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1799 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1800 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1801 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
1802 
1803 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1804 	}
1805 }
1806 
1807 #define AVG_MAX_MPDUS_PER_TID 128
1808 #define AVG_TIDS_PER_CLIENT 2
1809 #define AVG_FLOWS_PER_TID 2
1810 #define AVG_MSDUS_PER_FLOW 128
1811 #define AVG_MSDUS_PER_MPDU 4
1812 
1813 /*
1814  * Allocate and setup link descriptor pool that will be used by HW for
1815  * various link and queue descriptors and managed by WBM
1816  */
1817 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1818 {
1819 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1820 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1821 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1822 	uint32_t num_mpdus_per_link_desc =
1823 		hal_num_mpdus_per_link_desc(soc->hal_soc);
1824 	uint32_t num_msdus_per_link_desc =
1825 		hal_num_msdus_per_link_desc(soc->hal_soc);
1826 	uint32_t num_mpdu_links_per_queue_desc =
1827 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1828 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1829 	uint32_t total_link_descs, total_mem_size;
1830 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1831 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1832 	uint32_t num_link_desc_banks;
1833 	uint32_t last_bank_size = 0;
1834 	uint32_t entry_size, num_entries;
1835 	int i;
1836 	uint32_t desc_id = 0;
1837 	qdf_dma_addr_t *baseaddr = NULL;
1838 
1839 	/* Only Tx queue descriptors are allocated from common link descriptor
1840 	 * pool Rx queue descriptors are not included in this because (REO queue
1841 	 * extension descriptors) they are expected to be allocated contiguously
1842 	 * with REO queue descriptors
1843 	 */
1844 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1845 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1846 
1847 	num_mpdu_queue_descs = num_mpdu_link_descs /
1848 		num_mpdu_links_per_queue_desc;
1849 
1850 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1851 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1852 		num_msdus_per_link_desc;
1853 
1854 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1855 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1856 
1857 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1858 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1859 
1860 	/* Round up to power of 2 */
1861 	total_link_descs = 1;
1862 	while (total_link_descs < num_entries)
1863 		total_link_descs <<= 1;
1864 
1865 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1866 		FL("total_link_descs: %u, link_desc_size: %d"),
1867 		total_link_descs, link_desc_size);
1868 	total_mem_size =  total_link_descs * link_desc_size;
1869 
1870 	total_mem_size += link_desc_align;
1871 
1872 	if (total_mem_size <= max_alloc_size) {
1873 		num_link_desc_banks = 0;
1874 		last_bank_size = total_mem_size;
1875 	} else {
1876 		num_link_desc_banks = (total_mem_size) /
1877 			(max_alloc_size - link_desc_align);
1878 		last_bank_size = total_mem_size %
1879 			(max_alloc_size - link_desc_align);
1880 	}
1881 
1882 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1883 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
1884 		total_mem_size, num_link_desc_banks);
1885 
1886 	for (i = 0; i < num_link_desc_banks; i++) {
1887 		if (!dp_is_soc_reinit(soc)) {
1888 			baseaddr = &soc->link_desc_banks[i].
1889 					base_paddr_unaligned;
1890 			soc->link_desc_banks[i].base_vaddr_unaligned =
1891 				qdf_mem_alloc_consistent(soc->osdev,
1892 							 soc->osdev->dev,
1893 							 max_alloc_size,
1894 							 baseaddr);
1895 		}
1896 		soc->link_desc_banks[i].size = max_alloc_size;
1897 
1898 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1899 			soc->link_desc_banks[i].base_vaddr_unaligned) +
1900 			((unsigned long)(
1901 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1902 			link_desc_align));
1903 
1904 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
1905 			soc->link_desc_banks[i].base_paddr_unaligned) +
1906 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1907 			(unsigned long)(
1908 			soc->link_desc_banks[i].base_vaddr_unaligned));
1909 
1910 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
1911 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1912 				FL("Link descriptor memory alloc failed"));
1913 			goto fail;
1914 		}
1915 	}
1916 
1917 	if (last_bank_size) {
1918 		/* Allocate last bank in case total memory required is not exact
1919 		 * multiple of max_alloc_size
1920 		 */
1921 		if (!dp_is_soc_reinit(soc)) {
1922 			baseaddr = &soc->link_desc_banks[i].
1923 					base_paddr_unaligned;
1924 			soc->link_desc_banks[i].base_vaddr_unaligned =
1925 				qdf_mem_alloc_consistent(soc->osdev,
1926 							 soc->osdev->dev,
1927 							 last_bank_size,
1928 							 baseaddr);
1929 		}
1930 		soc->link_desc_banks[i].size = last_bank_size;
1931 
1932 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1933 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
1934 			((unsigned long)(
1935 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1936 			link_desc_align));
1937 
1938 		soc->link_desc_banks[i].base_paddr =
1939 			(unsigned long)(
1940 			soc->link_desc_banks[i].base_paddr_unaligned) +
1941 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1942 			(unsigned long)(
1943 			soc->link_desc_banks[i].base_vaddr_unaligned));
1944 	}
1945 
1946 
1947 	/* Allocate and setup link descriptor idle list for HW internal use */
1948 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1949 	total_mem_size = entry_size * total_link_descs;
1950 
1951 	if (total_mem_size <= max_alloc_size) {
1952 		void *desc;
1953 
1954 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
1955 			WBM_IDLE_LINK, 0, 0, total_link_descs)) {
1956 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1957 				FL("Link desc idle ring setup failed"));
1958 			goto fail;
1959 		}
1960 
1961 		hal_srng_access_start_unlocked(soc->hal_soc,
1962 			soc->wbm_idle_link_ring.hal_srng);
1963 
1964 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1965 			soc->link_desc_banks[i].base_paddr; i++) {
1966 			uint32_t num_entries = (soc->link_desc_banks[i].size -
1967 				((unsigned long)(
1968 				soc->link_desc_banks[i].base_vaddr) -
1969 				(unsigned long)(
1970 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1971 				/ link_desc_size;
1972 			unsigned long paddr = (unsigned long)(
1973 				soc->link_desc_banks[i].base_paddr);
1974 
1975 			while (num_entries && (desc = hal_srng_src_get_next(
1976 				soc->hal_soc,
1977 				soc->wbm_idle_link_ring.hal_srng))) {
1978 				hal_set_link_desc_addr(desc,
1979 					LINK_DESC_COOKIE(desc_id, i), paddr);
1980 				num_entries--;
1981 				desc_id++;
1982 				paddr += link_desc_size;
1983 			}
1984 		}
1985 		hal_srng_access_end_unlocked(soc->hal_soc,
1986 			soc->wbm_idle_link_ring.hal_srng);
1987 	} else {
1988 		uint32_t num_scatter_bufs;
1989 		uint32_t num_entries_per_buf;
1990 		uint32_t rem_entries;
1991 		uint8_t *scatter_buf_ptr;
1992 		uint16_t scatter_buf_num;
1993 		uint32_t buf_size = 0;
1994 
1995 		soc->wbm_idle_scatter_buf_size =
1996 			hal_idle_list_scatter_buf_size(soc->hal_soc);
1997 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
1998 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
1999 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
2000 					soc->hal_soc, total_mem_size,
2001 					soc->wbm_idle_scatter_buf_size);
2002 
2003 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
2004 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2005 					FL("scatter bufs size out of bounds"));
2006 			goto fail;
2007 		}
2008 
2009 		for (i = 0; i < num_scatter_bufs; i++) {
2010 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
2011 			if (!dp_is_soc_reinit(soc)) {
2012 				buf_size = soc->wbm_idle_scatter_buf_size;
2013 				soc->wbm_idle_scatter_buf_base_vaddr[i] =
2014 					qdf_mem_alloc_consistent(soc->osdev,
2015 								 soc->osdev->
2016 								 dev,
2017 								 buf_size,
2018 								 baseaddr);
2019 			}
2020 			if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
2021 				QDF_TRACE(QDF_MODULE_ID_DP,
2022 					  QDF_TRACE_LEVEL_ERROR,
2023 					  FL("Scatter lst memory alloc fail"));
2024 				goto fail;
2025 			}
2026 		}
2027 
2028 		/* Populate idle list scatter buffers with link descriptor
2029 		 * pointers
2030 		 */
2031 		scatter_buf_num = 0;
2032 		scatter_buf_ptr = (uint8_t *)(
2033 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
2034 		rem_entries = num_entries_per_buf;
2035 
2036 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
2037 			soc->link_desc_banks[i].base_paddr; i++) {
2038 			uint32_t num_link_descs =
2039 				(soc->link_desc_banks[i].size -
2040 				((unsigned long)(
2041 				soc->link_desc_banks[i].base_vaddr) -
2042 				(unsigned long)(
2043 				soc->link_desc_banks[i].base_vaddr_unaligned)))
2044 				/ link_desc_size;
2045 			unsigned long paddr = (unsigned long)(
2046 				soc->link_desc_banks[i].base_paddr);
2047 
2048 			while (num_link_descs) {
2049 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
2050 					LINK_DESC_COOKIE(desc_id, i), paddr);
2051 				num_link_descs--;
2052 				desc_id++;
2053 				paddr += link_desc_size;
2054 				rem_entries--;
2055 				if (rem_entries) {
2056 					scatter_buf_ptr += entry_size;
2057 				} else {
2058 					rem_entries = num_entries_per_buf;
2059 					scatter_buf_num++;
2060 
2061 					if (scatter_buf_num >= num_scatter_bufs)
2062 						break;
2063 
2064 					scatter_buf_ptr = (uint8_t *)(
2065 						soc->wbm_idle_scatter_buf_base_vaddr[
2066 						scatter_buf_num]);
2067 				}
2068 			}
2069 		}
2070 		/* Setup link descriptor idle list in HW */
2071 		hal_setup_link_idle_list(soc->hal_soc,
2072 			soc->wbm_idle_scatter_buf_base_paddr,
2073 			soc->wbm_idle_scatter_buf_base_vaddr,
2074 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
2075 			(uint32_t)(scatter_buf_ptr -
2076 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
2077 			scatter_buf_num-1])), total_link_descs);
2078 	}
2079 	return 0;
2080 
2081 fail:
2082 	if (soc->wbm_idle_link_ring.hal_srng) {
2083 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
2084 				WBM_IDLE_LINK, 0);
2085 	}
2086 
2087 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2088 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2089 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2090 				soc->wbm_idle_scatter_buf_size,
2091 				soc->wbm_idle_scatter_buf_base_vaddr[i],
2092 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
2093 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
2094 		}
2095 	}
2096 
2097 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
2098 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
2099 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2100 				soc->link_desc_banks[i].size,
2101 				soc->link_desc_banks[i].base_vaddr_unaligned,
2102 				soc->link_desc_banks[i].base_paddr_unaligned,
2103 				0);
2104 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
2105 		}
2106 	}
2107 	return QDF_STATUS_E_FAILURE;
2108 }
2109 
2110 /*
2111  * Free link descriptor pool that was setup HW
2112  */
2113 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
2114 {
2115 	int i;
2116 
2117 	if (soc->wbm_idle_link_ring.hal_srng) {
2118 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
2119 			WBM_IDLE_LINK, 0);
2120 	}
2121 
2122 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2123 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2124 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2125 				soc->wbm_idle_scatter_buf_size,
2126 				soc->wbm_idle_scatter_buf_base_vaddr[i],
2127 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
2128 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
2129 		}
2130 	}
2131 
2132 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
2133 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
2134 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2135 				soc->link_desc_banks[i].size,
2136 				soc->link_desc_banks[i].base_vaddr_unaligned,
2137 				soc->link_desc_banks[i].base_paddr_unaligned,
2138 				0);
2139 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
2140 		}
2141 	}
2142 }
2143 
2144 #ifdef IPA_OFFLOAD
2145 #define REO_DST_RING_SIZE_QCA6290 1023
2146 #ifndef QCA_WIFI_QCA8074_VP
2147 #define REO_DST_RING_SIZE_QCA8074 1023
2148 #else
2149 #define REO_DST_RING_SIZE_QCA8074 8
2150 #endif /* QCA_WIFI_QCA8074_VP */
2151 
2152 #else
2153 
2154 #define REO_DST_RING_SIZE_QCA6290 1024
2155 #ifndef QCA_WIFI_QCA8074_VP
2156 #define REO_DST_RING_SIZE_QCA8074 2048
2157 #else
2158 #define REO_DST_RING_SIZE_QCA8074 8
2159 #endif /* QCA_WIFI_QCA8074_VP */
2160 #endif /* IPA_OFFLOAD */
2161 
2162 /*
2163  * dp_ast_aging_timer_fn() - Timer callback function for WDS aging
2164  * @soc: Datapath SOC handle
2165  *
2166  * This is a timer function used to age out stale AST nodes from
2167  * AST table
2168  */
2169 #ifdef FEATURE_WDS
2170 static void dp_ast_aging_timer_fn(void *soc_hdl)
2171 {
2172 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
2173 	struct dp_pdev *pdev;
2174 	struct dp_vdev *vdev;
2175 	struct dp_peer *peer;
2176 	struct dp_ast_entry *ase, *temp_ase;
2177 	int i;
2178 	bool check_wds_ase = false;
2179 
2180 	if (soc->wds_ast_aging_timer_cnt++ >= DP_WDS_AST_AGING_TIMER_CNT) {
2181 		soc->wds_ast_aging_timer_cnt = 0;
2182 		check_wds_ase = true;
2183 	}
2184 
2185 	 /* Peer list access lock */
2186 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
2187 
2188 	/* AST list access lock */
2189 	qdf_spin_lock_bh(&soc->ast_lock);
2190 
2191 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
2192 		pdev = soc->pdev_list[i];
2193 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
2194 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
2195 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
2196 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
2197 					/*
2198 					 * Do not expire static ast entries
2199 					 * and HM WDS entries
2200 					 */
2201 					if (ase->type !=
2202 					    CDP_TXRX_AST_TYPE_WDS &&
2203 					    ase->type !=
2204 					    CDP_TXRX_AST_TYPE_MEC &&
2205 					    ase->type !=
2206 					    CDP_TXRX_AST_TYPE_DA)
2207 						continue;
2208 
2209 					/* Expire MEC entry every n sec.
2210 					 * This needs to be expired in
2211 					 * case if STA backbone is made as
2212 					 * AP backbone, In this case it needs
2213 					 * to be re-added as a WDS entry.
2214 					 */
2215 					if (ase->is_active && ase->type ==
2216 					    CDP_TXRX_AST_TYPE_MEC) {
2217 						ase->is_active = FALSE;
2218 						continue;
2219 					} else if (ase->is_active &&
2220 						   check_wds_ase) {
2221 						ase->is_active = FALSE;
2222 						continue;
2223 					}
2224 
2225 					if (ase->type ==
2226 					    CDP_TXRX_AST_TYPE_MEC) {
2227 						DP_STATS_INC(soc,
2228 							     ast.aged_out, 1);
2229 						dp_peer_del_ast(soc, ase);
2230 					} else if (check_wds_ase) {
2231 						DP_STATS_INC(soc,
2232 							     ast.aged_out, 1);
2233 						dp_peer_del_ast(soc, ase);
2234 					}
2235 				}
2236 			}
2237 		}
2238 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2239 	}
2240 
2241 	qdf_spin_unlock_bh(&soc->ast_lock);
2242 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2243 
2244 	if (qdf_atomic_read(&soc->cmn_init_done))
2245 		qdf_timer_mod(&soc->ast_aging_timer,
2246 			      DP_AST_AGING_TIMER_DEFAULT_MS);
2247 }
2248 
2249 
2250 /*
2251  * dp_soc_wds_attach() - Setup WDS timer and AST table
2252  * @soc:		Datapath SOC handle
2253  *
2254  * Return: None
2255  */
2256 static void dp_soc_wds_attach(struct dp_soc *soc)
2257 {
2258 	soc->wds_ast_aging_timer_cnt = 0;
2259 	qdf_timer_init(soc->osdev, &soc->ast_aging_timer,
2260 		       dp_ast_aging_timer_fn, (void *)soc,
2261 		       QDF_TIMER_TYPE_WAKE_APPS);
2262 
2263 	qdf_timer_mod(&soc->ast_aging_timer, DP_AST_AGING_TIMER_DEFAULT_MS);
2264 }
2265 
2266 /*
2267  * dp_soc_wds_detach() - Detach WDS data structures and timers
2268  * @txrx_soc: DP SOC handle
2269  *
2270  * Return: None
2271  */
2272 static void dp_soc_wds_detach(struct dp_soc *soc)
2273 {
2274 	qdf_timer_stop(&soc->ast_aging_timer);
2275 	qdf_timer_free(&soc->ast_aging_timer);
2276 }
2277 #else
2278 static void dp_soc_wds_attach(struct dp_soc *soc)
2279 {
2280 }
2281 
2282 static void dp_soc_wds_detach(struct dp_soc *soc)
2283 {
2284 }
2285 #endif
2286 
2287 /*
2288  * dp_soc_reset_ring_map() - Reset cpu ring map
2289  * @soc: Datapath soc handler
2290  *
2291  * This api resets the default cpu ring map
2292  */
2293 
2294 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
2295 {
2296 	uint8_t i;
2297 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2298 
2299 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
2300 		switch (nss_config) {
2301 		case dp_nss_cfg_first_radio:
2302 			/*
2303 			 * Setting Tx ring map for one nss offloaded radio
2304 			 */
2305 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
2306 			break;
2307 
2308 		case dp_nss_cfg_second_radio:
2309 			/*
2310 			 * Setting Tx ring for two nss offloaded radios
2311 			 */
2312 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
2313 			break;
2314 
2315 		case dp_nss_cfg_dbdc:
2316 			/*
2317 			 * Setting Tx ring map for 2 nss offloaded radios
2318 			 */
2319 			soc->tx_ring_map[i] =
2320 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
2321 			break;
2322 
2323 		case dp_nss_cfg_dbtc:
2324 			/*
2325 			 * Setting Tx ring map for 3 nss offloaded radios
2326 			 */
2327 			soc->tx_ring_map[i] =
2328 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
2329 			break;
2330 
2331 		default:
2332 			dp_err("tx_ring_map failed due to invalid nss cfg");
2333 			break;
2334 		}
2335 	}
2336 }
2337 
2338 /*
2339  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
2340  * @dp_soc - DP soc handle
2341  * @ring_type - ring type
2342  * @ring_num - ring_num
2343  *
2344  * return 0 or 1
2345  */
2346 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
2347 {
2348 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2349 	uint8_t status = 0;
2350 
2351 	switch (ring_type) {
2352 	case WBM2SW_RELEASE:
2353 	case REO_DST:
2354 	case RXDMA_BUF:
2355 		status = ((nss_config) & (1 << ring_num));
2356 		break;
2357 	default:
2358 		break;
2359 	}
2360 
2361 	return status;
2362 }
2363 
2364 /*
2365  * dp_soc_reset_intr_mask() - reset interrupt mask
2366  * @dp_soc - DP Soc handle
2367  *
2368  * Return: Return void
2369  */
2370 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
2371 {
2372 	uint8_t j;
2373 	int *grp_mask = NULL;
2374 	int group_number, mask, num_ring;
2375 
2376 	/* number of tx ring */
2377 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2378 
2379 	/*
2380 	 * group mask for tx completion  ring.
2381 	 */
2382 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2383 
2384 	/* loop and reset the mask for only offloaded ring */
2385 	for (j = 0; j < num_ring; j++) {
2386 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2387 			continue;
2388 		}
2389 
2390 		/*
2391 		 * Group number corresponding to tx offloaded ring.
2392 		 */
2393 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2394 		if (group_number < 0) {
2395 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2396 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2397 					WBM2SW_RELEASE, j);
2398 			return;
2399 		}
2400 
2401 		/* reset the tx mask for offloaded ring */
2402 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2403 		mask &= (~(1 << j));
2404 
2405 		/*
2406 		 * reset the interrupt mask for offloaded ring.
2407 		 */
2408 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2409 	}
2410 
2411 	/* number of rx rings */
2412 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2413 
2414 	/*
2415 	 * group mask for reo destination ring.
2416 	 */
2417 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2418 
2419 	/* loop and reset the mask for only offloaded ring */
2420 	for (j = 0; j < num_ring; j++) {
2421 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2422 			continue;
2423 		}
2424 
2425 		/*
2426 		 * Group number corresponding to rx offloaded ring.
2427 		 */
2428 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2429 		if (group_number < 0) {
2430 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2431 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2432 					REO_DST, j);
2433 			return;
2434 		}
2435 
2436 		/* set the interrupt mask for offloaded ring */
2437 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2438 		mask &= (~(1 << j));
2439 
2440 		/*
2441 		 * set the interrupt mask to zero for rx offloaded radio.
2442 		 */
2443 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2444 	}
2445 
2446 	/*
2447 	 * group mask for Rx buffer refill ring
2448 	 */
2449 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2450 
2451 	/* loop and reset the mask for only offloaded ring */
2452 	for (j = 0; j < MAX_PDEV_CNT; j++) {
2453 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2454 			continue;
2455 		}
2456 
2457 		/*
2458 		 * Group number corresponding to rx offloaded ring.
2459 		 */
2460 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2461 		if (group_number < 0) {
2462 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2463 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2464 					REO_DST, j);
2465 			return;
2466 		}
2467 
2468 		/* set the interrupt mask for offloaded ring */
2469 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2470 				group_number);
2471 		mask &= (~(1 << j));
2472 
2473 		/*
2474 		 * set the interrupt mask to zero for rx offloaded radio.
2475 		 */
2476 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2477 			group_number, mask);
2478 	}
2479 }
2480 
2481 #ifdef IPA_OFFLOAD
2482 /**
2483  * dp_reo_remap_config() - configure reo remap register value based
2484  *                         nss configuration.
2485  *		based on offload_radio value below remap configuration
2486  *		get applied.
2487  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2488  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2489  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2490  *		3 - both Radios handled by NSS (remap not required)
2491  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2492  *
2493  * @remap1: output parameter indicates reo remap 1 register value
2494  * @remap2: output parameter indicates reo remap 2 register value
2495  * Return: bool type, true if remap is configured else false.
2496  */
2497 static bool dp_reo_remap_config(struct dp_soc *soc,
2498 				uint32_t *remap1,
2499 				uint32_t *remap2)
2500 {
2501 	*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
2502 		(0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
2503 
2504 	*remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
2505 		(0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
2506 
2507 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
2508 
2509 	return true;
2510 }
2511 #else
2512 static bool dp_reo_remap_config(struct dp_soc *soc,
2513 				uint32_t *remap1,
2514 				uint32_t *remap2)
2515 {
2516 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2517 
2518 	switch (offload_radio) {
2519 	case dp_nss_cfg_default:
2520 		*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2521 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2522 			(0x3 << 18) | (0x4 << 21)) << 8;
2523 
2524 		*remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2525 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2526 			(0x3 << 18) | (0x4 << 21)) << 8;
2527 		break;
2528 	case dp_nss_cfg_first_radio:
2529 		*remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
2530 			(0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
2531 			(0x2 << 18) | (0x3 << 21)) << 8;
2532 
2533 		*remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
2534 			(0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
2535 			(0x4 << 18) | (0x2 << 21)) << 8;
2536 		break;
2537 
2538 	case dp_nss_cfg_second_radio:
2539 		*remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
2540 			(0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
2541 			(0x1 << 18) | (0x3 << 21)) << 8;
2542 
2543 		*remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
2544 			(0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
2545 			(0x4 << 18) | (0x1 << 21)) << 8;
2546 		break;
2547 
2548 	case dp_nss_cfg_dbdc:
2549 	case dp_nss_cfg_dbtc:
2550 		/* return false if both or all are offloaded to NSS */
2551 		return false;
2552 	}
2553 
2554 	dp_debug("remap1 %x remap2 %x offload_radio %u",
2555 		 *remap1, *remap2, offload_radio);
2556 	return true;
2557 }
2558 #endif
2559 
2560 /*
2561  * dp_reo_frag_dst_set() - configure reo register to set the
2562  *                        fragment destination ring
2563  * @soc : Datapath soc
2564  * @frag_dst_ring : output parameter to set fragment destination ring
2565  *
2566  * Based on offload_radio below fragment destination rings is selected
2567  * 0 - TCL
2568  * 1 - SW1
2569  * 2 - SW2
2570  * 3 - SW3
2571  * 4 - SW4
2572  * 5 - Release
2573  * 6 - FW
2574  * 7 - alternate select
2575  *
2576  * return: void
2577  */
2578 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2579 {
2580 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2581 
2582 	switch (offload_radio) {
2583 	case dp_nss_cfg_default:
2584 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2585 		break;
2586 	case dp_nss_cfg_dbdc:
2587 	case dp_nss_cfg_dbtc:
2588 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2589 		break;
2590 	default:
2591 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2592 				FL("dp_reo_frag_dst_set invalid offload radio config"));
2593 		break;
2594 	}
2595 }
2596 
2597 /*
2598  * dp_soc_cmn_setup() - Common SoC level initializion
2599  * @soc:		Datapath SOC handle
2600  *
2601  * This is an internal function used to setup common SOC data structures,
2602  * to be called from PDEV attach after receiving HW mode capabilities from FW
2603  */
2604 static int dp_soc_cmn_setup(struct dp_soc *soc)
2605 {
2606 	int i;
2607 	struct hal_reo_params reo_params;
2608 	int tx_ring_size;
2609 	int tx_comp_ring_size;
2610 	int reo_dst_ring_size;
2611 	uint32_t entries;
2612 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2613 
2614 	if (qdf_atomic_read(&soc->cmn_init_done))
2615 		return 0;
2616 
2617 	if (dp_hw_link_desc_pool_setup(soc))
2618 		goto fail1;
2619 
2620 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2621 	/* Setup SRNG rings */
2622 	/* Common rings */
2623 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
2624 		wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx))) {
2625 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2626 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
2627 		goto fail1;
2628 	}
2629 
2630 	soc->num_tcl_data_rings = 0;
2631 	/* Tx data rings */
2632 	if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
2633 		soc->num_tcl_data_rings =
2634 			wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
2635 		tx_comp_ring_size =
2636 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2637 		tx_ring_size =
2638 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2639 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2640 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
2641 				TCL_DATA, i, 0, tx_ring_size)) {
2642 				QDF_TRACE(QDF_MODULE_ID_DP,
2643 					QDF_TRACE_LEVEL_ERROR,
2644 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
2645 				goto fail1;
2646 			}
2647 			/*
2648 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2649 			 * count
2650 			 */
2651 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
2652 				WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
2653 				QDF_TRACE(QDF_MODULE_ID_DP,
2654 					QDF_TRACE_LEVEL_ERROR,
2655 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
2656 				goto fail1;
2657 			}
2658 		}
2659 	} else {
2660 		/* This will be incremented during per pdev ring setup */
2661 		soc->num_tcl_data_rings = 0;
2662 	}
2663 
2664 	if (dp_tx_soc_attach(soc)) {
2665 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2666 				FL("dp_tx_soc_attach failed"));
2667 		goto fail1;
2668 	}
2669 
2670 	entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
2671 	/* TCL command and status rings */
2672 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
2673 			  entries)) {
2674 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2675 			FL("dp_srng_setup failed for tcl_cmd_ring"));
2676 		goto fail1;
2677 	}
2678 
2679 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
2680 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
2681 			  entries)) {
2682 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2683 			FL("dp_srng_setup failed for tcl_status_ring"));
2684 		goto fail1;
2685 	}
2686 
2687 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2688 
2689 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2690 	 * descriptors
2691 	 */
2692 
2693 	/* Rx data rings */
2694 	if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2695 		soc->num_reo_dest_rings =
2696 			wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
2697 		QDF_TRACE(QDF_MODULE_ID_DP,
2698 			QDF_TRACE_LEVEL_INFO,
2699 			FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
2700 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2701 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2702 				i, 0, reo_dst_ring_size)) {
2703 				QDF_TRACE(QDF_MODULE_ID_DP,
2704 					  QDF_TRACE_LEVEL_ERROR,
2705 					  FL(RNG_ERR "reo_dest_ring [%d]"), i);
2706 				goto fail1;
2707 			}
2708 		}
2709 	} else {
2710 		/* This will be incremented during per pdev ring setup */
2711 		soc->num_reo_dest_rings = 0;
2712 	}
2713 
2714 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2715 	/* LMAC RxDMA to SW Rings configuration */
2716 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
2717 		/* Only valid for MCL */
2718 		struct dp_pdev *pdev = soc->pdev_list[0];
2719 
2720 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2721 			if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2722 					  RXDMA_DST, 0, i,
2723 					  entries)) {
2724 				QDF_TRACE(QDF_MODULE_ID_DP,
2725 					  QDF_TRACE_LEVEL_ERROR,
2726 					  FL(RNG_ERR "rxdma_err_dst_ring"));
2727 				goto fail1;
2728 			}
2729 		}
2730 	}
2731 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
2732 
2733 	/* REO reinjection ring */
2734 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
2735 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2736 			  entries)) {
2737 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2738 			  FL("dp_srng_setup failed for reo_reinject_ring"));
2739 		goto fail1;
2740 	}
2741 
2742 
2743 	/* Rx release ring */
2744 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2745 		wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx))) {
2746 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2747 			  FL("dp_srng_setup failed for rx_rel_ring"));
2748 		goto fail1;
2749 	}
2750 
2751 
2752 	/* Rx exception ring */
2753 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
2754 	if (dp_srng_setup(soc, &soc->reo_exception_ring,
2755 			  REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries)) {
2756 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2757 			  FL("dp_srng_setup failed for reo_exception_ring"));
2758 		goto fail1;
2759 	}
2760 
2761 
2762 	/* REO command and status rings */
2763 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
2764 		wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx))) {
2765 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2766 			FL("dp_srng_setup failed for reo_cmd_ring"));
2767 		goto fail1;
2768 	}
2769 
2770 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2771 	TAILQ_INIT(&soc->rx.reo_cmd_list);
2772 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2773 
2774 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
2775 		wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx))) {
2776 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2777 			FL("dp_srng_setup failed for reo_status_ring"));
2778 		goto fail1;
2779 	}
2780 
2781 
2782 	/* Reset the cpu ring map if radio is NSS offloaded */
2783 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
2784 		dp_soc_reset_cpu_ring_map(soc);
2785 		dp_soc_reset_intr_mask(soc);
2786 	}
2787 
2788 	/* Setup HW REO */
2789 	qdf_mem_zero(&reo_params, sizeof(reo_params));
2790 
2791 	if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
2792 
2793 		/*
2794 		 * Reo ring remap is not required if both radios
2795 		 * are offloaded to NSS
2796 		 */
2797 		if (!dp_reo_remap_config(soc,
2798 					&reo_params.remap1,
2799 					&reo_params.remap2))
2800 			goto out;
2801 
2802 		reo_params.rx_hash_enabled = true;
2803 	}
2804 
2805 	/* setup the global rx defrag waitlist */
2806 	TAILQ_INIT(&soc->rx.defrag.waitlist);
2807 	soc->rx.defrag.timeout_ms =
2808 		wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
2809 	soc->rx.defrag.next_flush_ms = 0;
2810 	soc->rx.flags.defrag_timeout_check =
2811 		wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
2812 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
2813 
2814 out:
2815 	/*
2816 	 * set the fragment destination ring
2817 	 */
2818 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2819 
2820 	hal_reo_setup(soc->hal_soc, &reo_params);
2821 
2822 	qdf_atomic_set(&soc->cmn_init_done, 1);
2823 	dp_soc_wds_attach(soc);
2824 
2825 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
2826 	return 0;
2827 fail1:
2828 	/*
2829 	 * Cleanup will be done as part of soc_detach, which will
2830 	 * be called on pdev attach failure
2831 	 */
2832 	return QDF_STATUS_E_FAILURE;
2833 }
2834 
2835 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
2836 
2837 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2838 {
2839 	struct cdp_lro_hash_config lro_hash;
2840 	QDF_STATUS status;
2841 
2842 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2843 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
2844 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2845 		dp_err("LRO, GRO and RX hash disabled");
2846 		return QDF_STATUS_E_FAILURE;
2847 	}
2848 
2849 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2850 
2851 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
2852 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
2853 		lro_hash.lro_enable = 1;
2854 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2855 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
2856 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2857 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
2858 	}
2859 
2860 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
2861 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2862 		 LRO_IPV4_SEED_ARR_SZ));
2863 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2864 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2865 		 LRO_IPV6_SEED_ARR_SZ));
2866 
2867 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2868 
2869 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
2870 		QDF_BUG(0);
2871 		dp_err("lro_hash_config not configured");
2872 		return QDF_STATUS_E_FAILURE;
2873 	}
2874 
2875 	status = soc->cdp_soc.ol_ops->lro_hash_config(pdev->ctrl_pdev,
2876 						      &lro_hash);
2877 	if (!QDF_IS_STATUS_SUCCESS(status)) {
2878 		dp_err("failed to send lro_hash_config to FW %u", status);
2879 		return status;
2880 	}
2881 
2882 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
2883 		lro_hash.lro_enable, lro_hash.tcp_flag,
2884 		lro_hash.tcp_flag_mask);
2885 
2886 	dp_info("toeplitz_hash_ipv4:");
2887 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2888 			   (void *)lro_hash.toeplitz_hash_ipv4,
2889 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2890 			   LRO_IPV4_SEED_ARR_SZ));
2891 
2892 	dp_info("toeplitz_hash_ipv6:");
2893 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2894 			   (void *)lro_hash.toeplitz_hash_ipv6,
2895 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2896 			   LRO_IPV6_SEED_ARR_SZ));
2897 
2898 	return status;
2899 }
2900 
2901 /*
2902 * dp_rxdma_ring_setup() - configure the RX DMA rings
2903 * @soc: data path SoC handle
2904 * @pdev: Physical device handle
2905 *
2906 * Return: 0 - success, > 0 - failure
2907 */
2908 #ifdef QCA_HOST2FW_RXBUF_RING
2909 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2910 	 struct dp_pdev *pdev)
2911 {
2912 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2913 	int max_mac_rings;
2914 	int i;
2915 
2916 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2917 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
2918 
2919 	for (i = 0; i < max_mac_rings; i++) {
2920 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2921 			 "%s: pdev_id %d mac_id %d",
2922 			 __func__, pdev->pdev_id, i);
2923 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
2924 			RXDMA_BUF, 1, i,
2925 			wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx))) {
2926 			QDF_TRACE(QDF_MODULE_ID_DP,
2927 				 QDF_TRACE_LEVEL_ERROR,
2928 				 FL("failed rx mac ring setup"));
2929 			return QDF_STATUS_E_FAILURE;
2930 		}
2931 	}
2932 	return QDF_STATUS_SUCCESS;
2933 }
2934 #else
2935 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2936 	 struct dp_pdev *pdev)
2937 {
2938 	return QDF_STATUS_SUCCESS;
2939 }
2940 #endif
2941 
2942 /**
2943  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2944  * @pdev - DP_PDEV handle
2945  *
2946  * Return: void
2947  */
2948 static inline void
2949 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2950 {
2951 	uint8_t map_id;
2952 	struct dp_soc *soc = pdev->soc;
2953 
2954 	if (!soc)
2955 		return;
2956 
2957 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2958 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
2959 			     default_dscp_tid_map,
2960 			     sizeof(default_dscp_tid_map));
2961 	}
2962 
2963 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
2964 		hal_tx_set_dscp_tid_map(soc->hal_soc,
2965 					default_dscp_tid_map,
2966 					map_id);
2967 	}
2968 }
2969 
2970 #ifdef IPA_OFFLOAD
2971 /**
2972  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
2973  * @soc: data path instance
2974  * @pdev: core txrx pdev context
2975  *
2976  * Return: QDF_STATUS_SUCCESS: success
2977  *         QDF_STATUS_E_RESOURCES: Error return
2978  */
2979 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2980 					   struct dp_pdev *pdev)
2981 {
2982 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2983 	int entries;
2984 
2985 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2986 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
2987 
2988 	/* Setup second Rx refill buffer ring */
2989 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2990 			  IPA_RX_REFILL_BUF_RING_IDX,
2991 			  pdev->pdev_id,
2992 			  entries)) {
2993 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2994 			FL("dp_srng_setup failed second rx refill ring"));
2995 		return QDF_STATUS_E_FAILURE;
2996 	}
2997 	return QDF_STATUS_SUCCESS;
2998 }
2999 
3000 /**
3001  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
3002  * @soc: data path instance
3003  * @pdev: core txrx pdev context
3004  *
3005  * Return: void
3006  */
3007 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3008 					      struct dp_pdev *pdev)
3009 {
3010 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3011 			IPA_RX_REFILL_BUF_RING_IDX);
3012 }
3013 
3014 #else
3015 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3016 					   struct dp_pdev *pdev)
3017 {
3018 	return QDF_STATUS_SUCCESS;
3019 }
3020 
3021 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3022 					      struct dp_pdev *pdev)
3023 {
3024 }
3025 #endif
3026 
3027 #if !defined(DISABLE_MON_CONFIG)
3028 /**
3029  * dp_mon_rings_setup() - Initialize Monitor rings based on target
3030  * @soc: soc handle
3031  * @pdev: physical device handle
3032  *
3033  * Return: nonzero on failure and zero on success
3034  */
3035 static
3036 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3037 {
3038 	int mac_id = 0;
3039 	int pdev_id = pdev->pdev_id;
3040 	int entries;
3041 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3042 
3043 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3044 
3045 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3046 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
3047 
3048 		if (soc->wlan_cfg_ctx->rxdma1_enable) {
3049 			entries =
3050 			   wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
3051 			if (dp_srng_setup(soc,
3052 					  &pdev->rxdma_mon_buf_ring[mac_id],
3053 					  RXDMA_MONITOR_BUF, 0, mac_for_pdev,
3054 					  entries)) {
3055 				QDF_TRACE(QDF_MODULE_ID_DP,
3056 					  QDF_TRACE_LEVEL_ERROR,
3057 					  FL(RNG_ERR "rxdma_mon_buf_ring "));
3058 				return QDF_STATUS_E_NOMEM;
3059 			}
3060 
3061 			entries =
3062 			   wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
3063 			if (dp_srng_setup(soc,
3064 					  &pdev->rxdma_mon_dst_ring[mac_id],
3065 					  RXDMA_MONITOR_DST, 0, mac_for_pdev,
3066 					  entries)) {
3067 				QDF_TRACE(QDF_MODULE_ID_DP,
3068 					  QDF_TRACE_LEVEL_ERROR,
3069 					  FL(RNG_ERR "rxdma_mon_dst_ring"));
3070 				return QDF_STATUS_E_NOMEM;
3071 			}
3072 
3073 			entries =
3074 			    wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3075 			if (dp_srng_setup(soc,
3076 					  &pdev->rxdma_mon_status_ring[mac_id],
3077 					  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
3078 					  entries)) {
3079 				QDF_TRACE(QDF_MODULE_ID_DP,
3080 					  QDF_TRACE_LEVEL_ERROR,
3081 					  FL(RNG_ERR "rxdma_mon_status_ring"));
3082 				return QDF_STATUS_E_NOMEM;
3083 			}
3084 
3085 			entries =
3086 			   wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
3087 			if (dp_srng_setup(soc,
3088 					  &pdev->rxdma_mon_desc_ring[mac_id],
3089 					  RXDMA_MONITOR_DESC, 0, mac_for_pdev,
3090 					  entries)) {
3091 				QDF_TRACE(QDF_MODULE_ID_DP,
3092 					  QDF_TRACE_LEVEL_ERROR,
3093 					  FL(RNG_ERR "rxdma_mon_desc_ring"));
3094 				return QDF_STATUS_E_NOMEM;
3095 			}
3096 		} else {
3097 			entries =
3098 			   wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3099 			if (dp_srng_setup(soc,
3100 					  &pdev->rxdma_mon_status_ring[mac_id],
3101 					  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
3102 					  entries)) {
3103 				QDF_TRACE(QDF_MODULE_ID_DP,
3104 					  QDF_TRACE_LEVEL_ERROR,
3105 					  FL(RNG_ERR "rxdma_mon_status_ring"));
3106 				return QDF_STATUS_E_NOMEM;
3107 			}
3108 		}
3109 	}
3110 
3111 	return QDF_STATUS_SUCCESS;
3112 }
3113 #else
3114 static
3115 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3116 {
3117 	return QDF_STATUS_SUCCESS;
3118 }
3119 #endif
3120 
3121 /*dp_iterate_update_peer_list - update peer stats on cal client timer
3122  * @pdev_hdl: pdev handle
3123  */
3124 #ifdef ATH_SUPPORT_EXT_STAT
3125 void  dp_iterate_update_peer_list(void *pdev_hdl)
3126 {
3127 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
3128 	struct dp_soc *soc = pdev->soc;
3129 	struct dp_vdev *vdev = NULL;
3130 	struct dp_peer *peer = NULL;
3131 
3132 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3133 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3134 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
3135 		DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
3136 			dp_cal_client_update_peer_stats(&peer->stats);
3137 		}
3138 	}
3139 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3140 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3141 }
3142 #else
3143 void  dp_iterate_update_peer_list(void *pdev_hdl)
3144 {
3145 }
3146 #endif
3147 
3148 /*
3149 * dp_pdev_attach_wifi3() - attach txrx pdev
3150 * @ctrl_pdev: Opaque PDEV object
3151 * @txrx_soc: Datapath SOC handle
3152 * @htc_handle: HTC handle for host-target interface
3153 * @qdf_osdev: QDF OS device
3154 * @pdev_id: PDEV ID
3155 *
3156 * Return: DP PDEV handle on success, NULL on failure
3157 */
3158 static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
3159 	struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
3160 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
3161 {
3162 	int tx_ring_size;
3163 	int tx_comp_ring_size;
3164 	int reo_dst_ring_size;
3165 	int entries;
3166 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3167 	int nss_cfg;
3168 
3169 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3170 	struct dp_pdev *pdev = NULL;
3171 
3172 	if (dp_is_soc_reinit(soc))
3173 		pdev = soc->pdev_list[pdev_id];
3174 	else
3175 		pdev = qdf_mem_malloc(sizeof(*pdev));
3176 
3177 	if (!pdev) {
3178 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3179 			FL("DP PDEV memory allocation failed"));
3180 		goto fail0;
3181 	}
3182 
3183 	/*
3184 	 * Variable to prevent double pdev deinitialization during
3185 	 * radio detach execution .i.e. in the absence of any vdev.
3186 	 */
3187 	pdev->pdev_deinit = 0;
3188 	pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
3189 
3190 	if (!pdev->invalid_peer) {
3191 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3192 			  FL("Invalid peer memory allocation failed"));
3193 		qdf_mem_free(pdev);
3194 		goto fail0;
3195 	}
3196 
3197 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3198 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
3199 
3200 	if (!pdev->wlan_cfg_ctx) {
3201 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3202 			FL("pdev cfg_attach failed"));
3203 
3204 		qdf_mem_free(pdev->invalid_peer);
3205 		qdf_mem_free(pdev);
3206 		goto fail0;
3207 	}
3208 
3209 	/*
3210 	 * set nss pdev config based on soc config
3211 	 */
3212 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
3213 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
3214 			(nss_cfg & (1 << pdev_id)));
3215 
3216 	pdev->soc = soc;
3217 	pdev->ctrl_pdev = ctrl_pdev;
3218 	pdev->pdev_id = pdev_id;
3219 	soc->pdev_list[pdev_id] = pdev;
3220 
3221 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
3222 	soc->pdev_count++;
3223 
3224 	TAILQ_INIT(&pdev->vdev_list);
3225 	qdf_spinlock_create(&pdev->vdev_list_lock);
3226 	pdev->vdev_count = 0;
3227 
3228 	qdf_spinlock_create(&pdev->tx_mutex);
3229 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
3230 	TAILQ_INIT(&pdev->neighbour_peers_list);
3231 	pdev->neighbour_peers_added = false;
3232 	pdev->monitor_configured = false;
3233 
3234 	if (dp_soc_cmn_setup(soc)) {
3235 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3236 			FL("dp_soc_cmn_setup failed"));
3237 		goto fail1;
3238 	}
3239 
3240 	/* Setup per PDEV TCL rings if configured */
3241 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3242 		tx_ring_size =
3243 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
3244 		tx_comp_ring_size =
3245 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
3246 
3247 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
3248 			pdev_id, pdev_id, tx_ring_size)) {
3249 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3250 				FL("dp_srng_setup failed for tcl_data_ring"));
3251 			goto fail1;
3252 		}
3253 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
3254 			WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
3255 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3256 				FL("dp_srng_setup failed for tx_comp_ring"));
3257 			goto fail1;
3258 		}
3259 		soc->num_tcl_data_rings++;
3260 	}
3261 
3262 	/* Tx specific init */
3263 	if (dp_tx_pdev_attach(pdev)) {
3264 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3265 			FL("dp_tx_pdev_attach failed"));
3266 		goto fail1;
3267 	}
3268 
3269 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
3270 	/* Setup per PDEV REO rings if configured */
3271 	if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
3272 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
3273 			pdev_id, pdev_id, reo_dst_ring_size)) {
3274 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3275 				FL("dp_srng_setup failed for reo_dest_ringn"));
3276 			goto fail1;
3277 		}
3278 		soc->num_reo_dest_rings++;
3279 
3280 	}
3281 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
3282 		wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx))) {
3283 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3284 			 FL("dp_srng_setup failed rx refill ring"));
3285 		goto fail1;
3286 	}
3287 
3288 	if (dp_rxdma_ring_setup(soc, pdev)) {
3289 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3290 			 FL("RXDMA ring config failed"));
3291 		goto fail1;
3292 	}
3293 
3294 	if (dp_mon_rings_setup(soc, pdev)) {
3295 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3296 			  FL("MONITOR rings setup failed"));
3297 		goto fail1;
3298 	}
3299 
3300 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
3301 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
3302 		if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
3303 				  0, pdev_id,
3304 				  entries)) {
3305 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3306 				  FL(RNG_ERR "rxdma_err_dst_ring"));
3307 			goto fail1;
3308 		}
3309 	}
3310 
3311 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
3312 		goto fail1;
3313 
3314 	if (dp_ipa_ring_resource_setup(soc, pdev))
3315 		goto fail1;
3316 
3317 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
3318 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3319 			FL("dp_ipa_uc_attach failed"));
3320 		goto fail1;
3321 	}
3322 
3323 	/* Rx specific init */
3324 	if (dp_rx_pdev_attach(pdev)) {
3325 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3326 			  FL("dp_rx_pdev_attach failed"));
3327 		goto fail1;
3328 	}
3329 
3330 	DP_STATS_INIT(pdev);
3331 
3332 	/* Monitor filter init */
3333 	pdev->mon_filter_mode = MON_FILTER_ALL;
3334 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
3335 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
3336 	pdev->fp_data_filter = FILTER_DATA_ALL;
3337 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
3338 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
3339 	pdev->mo_data_filter = FILTER_DATA_ALL;
3340 
3341 	dp_local_peer_id_pool_init(pdev);
3342 
3343 	dp_dscp_tid_map_setup(pdev);
3344 
3345 	/* Rx monitor mode specific init */
3346 	if (dp_rx_pdev_mon_attach(pdev)) {
3347 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3348 				"dp_rx_pdev_mon_attach failed");
3349 		goto fail1;
3350 	}
3351 
3352 	if (dp_wdi_event_attach(pdev)) {
3353 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3354 				"dp_wdi_evet_attach failed");
3355 		goto fail1;
3356 	}
3357 
3358 	/* set the reo destination during initialization */
3359 	pdev->reo_dest = pdev->pdev_id + 1;
3360 
3361 	/*
3362 	 * initialize ppdu tlv list
3363 	 */
3364 	TAILQ_INIT(&pdev->ppdu_info_list);
3365 	pdev->tlv_count = 0;
3366 	pdev->list_depth = 0;
3367 
3368 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
3369 
3370 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
3371 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
3372 			      TRUE);
3373 
3374 	/* initlialize cal client timer */
3375 	dp_cal_client_attach(&pdev->cal_client_ctx, pdev, pdev->soc->osdev,
3376 			     &dp_iterate_update_peer_list);
3377 	qdf_event_create(&pdev->fw_peer_stats_event);
3378 
3379 	return (struct cdp_pdev *)pdev;
3380 
3381 fail1:
3382 	dp_pdev_detach((struct cdp_pdev *)pdev, 0);
3383 
3384 fail0:
3385 	return NULL;
3386 }
3387 
3388 /*
3389 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
3390 * @soc: data path SoC handle
3391 * @pdev: Physical device handle
3392 *
3393 * Return: void
3394 */
3395 #ifdef QCA_HOST2FW_RXBUF_RING
3396 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3397 	 struct dp_pdev *pdev)
3398 {
3399 	int max_mac_rings =
3400 		 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
3401 	int i;
3402 
3403 	max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
3404 				max_mac_rings : MAX_RX_MAC_RINGS;
3405 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
3406 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
3407 			 RXDMA_BUF, 1);
3408 
3409 	qdf_timer_free(&soc->mon_reap_timer);
3410 }
3411 #else
3412 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3413 	 struct dp_pdev *pdev)
3414 {
3415 }
3416 #endif
3417 
3418 /*
3419  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3420  * @pdev: device object
3421  *
3422  * Return: void
3423  */
3424 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3425 {
3426 	struct dp_neighbour_peer *peer = NULL;
3427 	struct dp_neighbour_peer *temp_peer = NULL;
3428 
3429 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3430 			neighbour_peer_list_elem, temp_peer) {
3431 		/* delete this peer from the list */
3432 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
3433 				peer, neighbour_peer_list_elem);
3434 		qdf_mem_free(peer);
3435 	}
3436 
3437 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3438 }
3439 
3440 /**
3441 * dp_htt_ppdu_stats_detach() - detach stats resources
3442 * @pdev: Datapath PDEV handle
3443 *
3444 * Return: void
3445 */
3446 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3447 {
3448 	struct ppdu_info *ppdu_info, *ppdu_info_next;
3449 
3450 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3451 			ppdu_info_list_elem, ppdu_info_next) {
3452 		if (!ppdu_info)
3453 			break;
3454 		qdf_assert_always(ppdu_info->nbuf);
3455 		qdf_nbuf_free(ppdu_info->nbuf);
3456 		qdf_mem_free(ppdu_info);
3457 	}
3458 }
3459 
3460 #if !defined(DISABLE_MON_CONFIG)
3461 
3462 static
3463 void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
3464 			 int mac_id)
3465 {
3466 		if (soc->wlan_cfg_ctx->rxdma1_enable) {
3467 			dp_srng_cleanup(soc,
3468 					&pdev->rxdma_mon_buf_ring[mac_id],
3469 					RXDMA_MONITOR_BUF, 0);
3470 
3471 			dp_srng_cleanup(soc,
3472 					&pdev->rxdma_mon_dst_ring[mac_id],
3473 					RXDMA_MONITOR_DST, 0);
3474 
3475 			dp_srng_cleanup(soc,
3476 					&pdev->rxdma_mon_status_ring[mac_id],
3477 					RXDMA_MONITOR_STATUS, 0);
3478 
3479 			dp_srng_cleanup(soc,
3480 					&pdev->rxdma_mon_desc_ring[mac_id],
3481 					RXDMA_MONITOR_DESC, 0);
3482 
3483 			dp_srng_cleanup(soc,
3484 					&pdev->rxdma_err_dst_ring[mac_id],
3485 					RXDMA_DST, 0);
3486 		} else {
3487 			dp_srng_cleanup(soc,
3488 					&pdev->rxdma_mon_status_ring[mac_id],
3489 					RXDMA_MONITOR_STATUS, 0);
3490 
3491 			dp_srng_cleanup(soc,
3492 					&pdev->rxdma_err_dst_ring[mac_id],
3493 					RXDMA_DST, 0);
3494 		}
3495 
3496 }
3497 #else
3498 static void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
3499 				int mac_id)
3500 {
3501 }
3502 #endif
3503 
3504 /**
3505  * dp_mon_ring_deinit() - Placeholder to deinitialize Monitor rings
3506  *
3507  * @soc: soc handle
3508  * @pdev: datapath physical dev handle
3509  * @mac_id: mac number
3510  *
3511  * Return: None
3512  */
3513 static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3514 			       int mac_id)
3515 {
3516 }
3517 
3518 /**
3519  * dp_pdev_mem_reset() - Reset txrx pdev memory
3520  * @pdev: dp pdev handle
3521  *
3522  * Return: None
3523  */
3524 static void dp_pdev_mem_reset(struct dp_pdev *pdev)
3525 {
3526 	uint16_t len = 0;
3527 	uint8_t *dp_pdev_offset = (uint8_t *)pdev;
3528 
3529 	len = sizeof(struct dp_pdev) -
3530 		offsetof(struct dp_pdev, pdev_deinit) -
3531 		sizeof(pdev->pdev_deinit);
3532 	dp_pdev_offset = dp_pdev_offset +
3533 			 offsetof(struct dp_pdev, pdev_deinit) +
3534 			 sizeof(pdev->pdev_deinit);
3535 
3536 	qdf_mem_zero(dp_pdev_offset, len);
3537 }
3538 
3539 /**
3540  * dp_pdev_deinit() - Deinit txrx pdev
3541  * @txrx_pdev: Datapath PDEV handle
3542  * @force: Force deinit
3543  *
3544  * Return: None
3545  */
3546 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
3547 {
3548 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3549 	struct dp_soc *soc = pdev->soc;
3550 	qdf_nbuf_t curr_nbuf, next_nbuf;
3551 	int mac_id;
3552 
3553 	/*
3554 	 * Prevent double pdev deinitialization during radio detach
3555 	 * execution .i.e. in the absence of any vdev
3556 	 */
3557 	if (pdev->pdev_deinit)
3558 		return;
3559 
3560 	pdev->pdev_deinit = 1;
3561 
3562 	dp_wdi_event_detach(pdev);
3563 
3564 	dp_tx_pdev_detach(pdev);
3565 
3566 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3567 		dp_srng_deinit(soc, &soc->tcl_data_ring[pdev->pdev_id],
3568 			       TCL_DATA, pdev->pdev_id);
3569 		dp_srng_deinit(soc, &soc->tx_comp_ring[pdev->pdev_id],
3570 			       WBM2SW_RELEASE, pdev->pdev_id);
3571 	}
3572 
3573 	dp_pktlogmod_exit(pdev);
3574 
3575 	dp_rx_pdev_detach(pdev);
3576 	dp_rx_pdev_mon_detach(pdev);
3577 	dp_neighbour_peers_detach(pdev);
3578 	qdf_spinlock_destroy(&pdev->tx_mutex);
3579 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
3580 
3581 	dp_ipa_uc_detach(soc, pdev);
3582 
3583 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
3584 
3585 	/* Cleanup per PDEV REO rings if configured */
3586 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3587 		dp_srng_deinit(soc, &soc->reo_dest_ring[pdev->pdev_id],
3588 			       REO_DST, pdev->pdev_id);
3589 	}
3590 
3591 	dp_srng_deinit(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
3592 
3593 	dp_rxdma_ring_cleanup(soc, pdev);
3594 
3595 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3596 		dp_mon_ring_deinit(soc, pdev, mac_id);
3597 		dp_srng_deinit(soc, &pdev->rxdma_err_dst_ring[mac_id],
3598 			       RXDMA_DST, 0);
3599 	}
3600 
3601 	curr_nbuf = pdev->invalid_peer_head_msdu;
3602 	while (curr_nbuf) {
3603 		next_nbuf = qdf_nbuf_next(curr_nbuf);
3604 		qdf_nbuf_free(curr_nbuf);
3605 		curr_nbuf = next_nbuf;
3606 	}
3607 	pdev->invalid_peer_head_msdu = NULL;
3608 	pdev->invalid_peer_tail_msdu = NULL;
3609 
3610 	dp_htt_ppdu_stats_detach(pdev);
3611 
3612 	qdf_nbuf_free(pdev->sojourn_buf);
3613 
3614 	dp_cal_client_detach(&pdev->cal_client_ctx);
3615 
3616 	soc->pdev_count--;
3617 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
3618 	qdf_mem_free(pdev->invalid_peer);
3619 	qdf_mem_free(pdev->dp_txrx_handle);
3620 	dp_pdev_mem_reset(pdev);
3621 }
3622 
3623 /**
3624  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
3625  * @txrx_pdev: Datapath PDEV handle
3626  * @force: Force deinit
3627  *
3628  * Return: None
3629  */
3630 static void dp_pdev_deinit_wifi3(struct cdp_pdev *txrx_pdev, int force)
3631 {
3632 	dp_pdev_deinit(txrx_pdev, force);
3633 }
3634 
3635 /*
3636  * dp_pdev_detach() - Complete rest of pdev detach
3637  * @txrx_pdev: Datapath PDEV handle
3638  * @force: Force deinit
3639  *
3640  * Return: None
3641  */
3642 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
3643 {
3644 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3645 	struct dp_soc *soc = pdev->soc;
3646 	int mac_id;
3647 
3648 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3649 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
3650 				TCL_DATA, pdev->pdev_id);
3651 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
3652 				WBM2SW_RELEASE, pdev->pdev_id);
3653 	}
3654 
3655 	dp_mon_link_free(pdev);
3656 
3657 	/* Cleanup per PDEV REO rings if configured */
3658 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3659 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
3660 				REO_DST, pdev->pdev_id);
3661 	}
3662 
3663 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
3664 
3665 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3666 		dp_mon_ring_cleanup(soc, pdev, mac_id);
3667 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3668 				RXDMA_DST, 0);
3669 	}
3670 
3671 	soc->pdev_list[pdev->pdev_id] = NULL;
3672 	qdf_mem_free(pdev);
3673 }
3674 
3675 /*
3676  * dp_pdev_detach_wifi3() - detach txrx pdev
3677  * @txrx_pdev: Datapath PDEV handle
3678  * @force: Force detach
3679  *
3680  * Return: None
3681  */
3682 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
3683 {
3684 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3685 	struct dp_soc *soc = pdev->soc;
3686 
3687 	if (dp_is_soc_reinit(soc)) {
3688 		dp_pdev_detach(txrx_pdev, force);
3689 	} else {
3690 		dp_pdev_deinit(txrx_pdev, force);
3691 		dp_pdev_detach(txrx_pdev, force);
3692 	}
3693 }
3694 
3695 /*
3696  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
3697  * @soc: DP SOC handle
3698  */
3699 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
3700 {
3701 	struct reo_desc_list_node *desc;
3702 	struct dp_rx_tid *rx_tid;
3703 
3704 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3705 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
3706 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3707 		rx_tid = &desc->rx_tid;
3708 		qdf_mem_unmap_nbytes_single(soc->osdev,
3709 			rx_tid->hw_qdesc_paddr,
3710 			QDF_DMA_BIDIRECTIONAL,
3711 			rx_tid->hw_qdesc_alloc_size);
3712 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3713 		qdf_mem_free(desc);
3714 	}
3715 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3716 	qdf_list_destroy(&soc->reo_desc_freelist);
3717 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
3718 }
3719 
3720 /**
3721  * dp_soc_mem_reset() - Reset Dp Soc memory
3722  * @soc: DP handle
3723  *
3724  * Return: None
3725  */
3726 static void dp_soc_mem_reset(struct dp_soc *soc)
3727 {
3728 	uint16_t len = 0;
3729 	uint8_t *dp_soc_offset = (uint8_t *)soc;
3730 
3731 	len = sizeof(struct dp_soc) -
3732 		offsetof(struct dp_soc, dp_soc_reinit) -
3733 		sizeof(soc->dp_soc_reinit);
3734 	dp_soc_offset = dp_soc_offset +
3735 			offsetof(struct dp_soc, dp_soc_reinit) +
3736 			sizeof(soc->dp_soc_reinit);
3737 
3738 	qdf_mem_zero(dp_soc_offset, len);
3739 }
3740 
3741 /**
3742  * dp_soc_deinit() - Deinitialize txrx SOC
3743  * @txrx_soc: Opaque DP SOC handle
3744  *
3745  * Return: None
3746  */
3747 static void dp_soc_deinit(void *txrx_soc)
3748 {
3749 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3750 	int i;
3751 
3752 	qdf_atomic_set(&soc->cmn_init_done, 0);
3753 
3754 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3755 		if (soc->pdev_list[i])
3756 			dp_pdev_deinit((struct cdp_pdev *)
3757 					soc->pdev_list[i], 1);
3758 	}
3759 
3760 	qdf_flush_work(&soc->htt_stats.work);
3761 	qdf_disable_work(&soc->htt_stats.work);
3762 
3763 	/* Free pending htt stats messages */
3764 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
3765 
3766 	dp_reo_cmdlist_destroy(soc);
3767 
3768 	dp_peer_find_detach(soc);
3769 
3770 	/* Free the ring memories */
3771 	/* Common rings */
3772 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3773 
3774 	/* Tx data rings */
3775 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3776 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3777 			dp_srng_deinit(soc, &soc->tcl_data_ring[i],
3778 				       TCL_DATA, i);
3779 			dp_srng_deinit(soc, &soc->tx_comp_ring[i],
3780 				       WBM2SW_RELEASE, i);
3781 		}
3782 	}
3783 
3784 	/* TCL command and status rings */
3785 	dp_srng_deinit(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3786 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3787 
3788 	/* Rx data rings */
3789 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3790 		soc->num_reo_dest_rings =
3791 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3792 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
3793 			/* TODO: Get number of rings and ring sizes
3794 			 * from wlan_cfg
3795 			 */
3796 			dp_srng_deinit(soc, &soc->reo_dest_ring[i],
3797 				       REO_DST, i);
3798 		}
3799 	}
3800 	/* REO reinjection ring */
3801 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3802 
3803 	/* Rx release ring */
3804 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3805 
3806 	/* Rx exception ring */
3807 	/* TODO: Better to store ring_type and ring_num in
3808 	 * dp_srng during setup
3809 	 */
3810 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3811 
3812 	/* REO command and status rings */
3813 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3814 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
3815 
3816 	dp_soc_wds_detach(soc);
3817 
3818 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
3819 	qdf_spinlock_destroy(&soc->htt_stats.lock);
3820 
3821 	htt_soc_htc_dealloc(soc->htt_handle);
3822 
3823 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3824 
3825 	dp_reo_cmdlist_destroy(soc);
3826 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3827 	dp_reo_desc_freelist_destroy(soc);
3828 
3829 	qdf_spinlock_destroy(&soc->ast_lock);
3830 
3831 	dp_soc_mem_reset(soc);
3832 }
3833 
3834 /**
3835  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
3836  * @txrx_soc: Opaque DP SOC handle
3837  *
3838  * Return: None
3839  */
3840 static void dp_soc_deinit_wifi3(void *txrx_soc)
3841 {
3842 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3843 
3844 	soc->dp_soc_reinit = 1;
3845 	dp_soc_deinit(txrx_soc);
3846 }
3847 
3848 /*
3849  * dp_soc_detach() - Detach rest of txrx SOC
3850  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
3851  *
3852  * Return: None
3853  */
3854 static void dp_soc_detach(void *txrx_soc)
3855 {
3856 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3857 	int i;
3858 
3859 	qdf_atomic_set(&soc->cmn_init_done, 0);
3860 
3861 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
3862 	 * SW descriptors
3863 	 */
3864 
3865 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3866 		if (soc->pdev_list[i])
3867 			dp_pdev_detach((struct cdp_pdev *)
3868 					     soc->pdev_list[i], 1);
3869 	}
3870 
3871 	/* Free the ring memories */
3872 	/* Common rings */
3873 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3874 
3875 	dp_tx_soc_detach(soc);
3876 
3877 	/* Tx data rings */
3878 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3879 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3880 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
3881 				TCL_DATA, i);
3882 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
3883 				WBM2SW_RELEASE, i);
3884 		}
3885 	}
3886 
3887 	/* TCL command and status rings */
3888 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3889 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3890 
3891 	/* Rx data rings */
3892 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3893 		soc->num_reo_dest_rings =
3894 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3895 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
3896 			/* TODO: Get number of rings and ring sizes
3897 			 * from wlan_cfg
3898 			 */
3899 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
3900 				REO_DST, i);
3901 		}
3902 	}
3903 	/* REO reinjection ring */
3904 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3905 
3906 	/* Rx release ring */
3907 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3908 
3909 	/* Rx exception ring */
3910 	/* TODO: Better to store ring_type and ring_num in
3911 	 * dp_srng during setup
3912 	 */
3913 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3914 
3915 	/* REO command and status rings */
3916 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3917 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
3918 	dp_hw_link_desc_pool_cleanup(soc);
3919 
3920 	htt_soc_detach(soc->htt_handle);
3921 	soc->dp_soc_reinit = 0;
3922 
3923 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
3924 
3925 	qdf_mem_free(soc);
3926 }
3927 
3928 /*
3929  * dp_soc_detach_wifi3() - Detach txrx SOC
3930  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
3931  *
3932  * Return: None
3933  */
3934 static void dp_soc_detach_wifi3(void *txrx_soc)
3935 {
3936 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3937 
3938 	if (dp_is_soc_reinit(soc)) {
3939 		dp_soc_detach(txrx_soc);
3940 	} else {
3941 		dp_soc_deinit(txrx_soc);
3942 		dp_soc_detach(txrx_soc);
3943 	}
3944 
3945 }
3946 
3947 #if !defined(DISABLE_MON_CONFIG)
3948 /**
3949  * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
3950  * @soc: soc handle
3951  * @pdev: physical device handle
3952  * @mac_id: ring number
3953  * @mac_for_pdev: mac_id
3954  *
3955  * Return: non-zero for failure, zero for success
3956  */
3957 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
3958 					struct dp_pdev *pdev,
3959 					int mac_id,
3960 					int mac_for_pdev)
3961 {
3962 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3963 
3964 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
3965 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
3966 					pdev->rxdma_mon_buf_ring[mac_id]
3967 					.hal_srng,
3968 					RXDMA_MONITOR_BUF);
3969 
3970 		if (status != QDF_STATUS_SUCCESS) {
3971 			dp_err("Failed to send htt srng setup message for Rxdma mon buf ring");
3972 			return status;
3973 		}
3974 
3975 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
3976 					pdev->rxdma_mon_dst_ring[mac_id]
3977 					.hal_srng,
3978 					RXDMA_MONITOR_DST);
3979 
3980 		if (status != QDF_STATUS_SUCCESS) {
3981 			dp_err("Failed to send htt srng setup message for Rxdma mon dst ring");
3982 			return status;
3983 		}
3984 
3985 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
3986 					pdev->rxdma_mon_status_ring[mac_id]
3987 					.hal_srng,
3988 					RXDMA_MONITOR_STATUS);
3989 
3990 		if (status != QDF_STATUS_SUCCESS) {
3991 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
3992 			return status;
3993 		}
3994 
3995 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
3996 					pdev->rxdma_mon_desc_ring[mac_id]
3997 					.hal_srng,
3998 					RXDMA_MONITOR_DESC);
3999 
4000 		if (status != QDF_STATUS_SUCCESS) {
4001 			dp_err("Failed to send htt srng message for Rxdma mon desc ring");
4002 			return status;
4003 		}
4004 	} else {
4005 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4006 					pdev->rxdma_mon_status_ring[mac_id]
4007 					.hal_srng,
4008 					RXDMA_MONITOR_STATUS);
4009 
4010 		if (status != QDF_STATUS_SUCCESS) {
4011 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4012 			return status;
4013 		}
4014 	}
4015 
4016 	return status;
4017 
4018 }
4019 #else
4020 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4021 					struct dp_pdev *pdev,
4022 					int mac_id,
4023 					int mac_for_pdev)
4024 {
4025 	return QDF_STATUS_SUCCESS;
4026 }
4027 #endif
4028 
4029 /*
4030  * dp_rxdma_ring_config() - configure the RX DMA rings
4031  *
4032  * This function is used to configure the MAC rings.
4033  * On MCL host provides buffers in Host2FW ring
4034  * FW refills (copies) buffers to the ring and updates
4035  * ring_idx in register
4036  *
4037  * @soc: data path SoC handle
4038  *
4039  * Return: zero on success, non-zero on failure
4040  */
4041 #ifdef QCA_HOST2FW_RXBUF_RING
4042 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4043 {
4044 	int i;
4045 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4046 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4047 		struct dp_pdev *pdev = soc->pdev_list[i];
4048 
4049 		if (pdev) {
4050 			int mac_id;
4051 			bool dbs_enable = 0;
4052 			int max_mac_rings =
4053 				 wlan_cfg_get_num_mac_rings
4054 				(pdev->wlan_cfg_ctx);
4055 
4056 			htt_srng_setup(soc->htt_handle, 0,
4057 				 pdev->rx_refill_buf_ring.hal_srng,
4058 				 RXDMA_BUF);
4059 
4060 			if (pdev->rx_refill_buf_ring2.hal_srng)
4061 				htt_srng_setup(soc->htt_handle, 0,
4062 					pdev->rx_refill_buf_ring2.hal_srng,
4063 					RXDMA_BUF);
4064 
4065 			if (soc->cdp_soc.ol_ops->
4066 				is_hw_dbs_2x2_capable) {
4067 				dbs_enable = soc->cdp_soc.ol_ops->
4068 					is_hw_dbs_2x2_capable(soc->ctrl_psoc);
4069 			}
4070 
4071 			if (dbs_enable) {
4072 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4073 				QDF_TRACE_LEVEL_ERROR,
4074 				FL("DBS enabled max_mac_rings %d"),
4075 					 max_mac_rings);
4076 			} else {
4077 				max_mac_rings = 1;
4078 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4079 					 QDF_TRACE_LEVEL_ERROR,
4080 					 FL("DBS disabled, max_mac_rings %d"),
4081 					 max_mac_rings);
4082 			}
4083 
4084 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4085 					 FL("pdev_id %d max_mac_rings %d"),
4086 					 pdev->pdev_id, max_mac_rings);
4087 
4088 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
4089 				int mac_for_pdev = dp_get_mac_id_for_pdev(
4090 							mac_id, pdev->pdev_id);
4091 
4092 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4093 					 QDF_TRACE_LEVEL_ERROR,
4094 					 FL("mac_id %d"), mac_for_pdev);
4095 
4096 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4097 					 pdev->rx_mac_buf_ring[mac_id]
4098 						.hal_srng,
4099 					 RXDMA_BUF);
4100 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4101 					pdev->rxdma_err_dst_ring[mac_id]
4102 						.hal_srng,
4103 					RXDMA_DST);
4104 
4105 				/* Configure monitor mode rings */
4106 				status = dp_mon_htt_srng_setup(soc, pdev,
4107 							       mac_id,
4108 							       mac_for_pdev);
4109 				if (status != QDF_STATUS_SUCCESS) {
4110 					dp_err("Failed to send htt monitor messages to target");
4111 					return status;
4112 				}
4113 
4114 			}
4115 		}
4116 	}
4117 
4118 	/*
4119 	 * Timer to reap rxdma status rings.
4120 	 * Needed until we enable ppdu end interrupts
4121 	 */
4122 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
4123 			dp_service_mon_rings, (void *)soc,
4124 			QDF_TIMER_TYPE_WAKE_APPS);
4125 	soc->reap_timer_init = 1;
4126 	return status;
4127 }
4128 #else
4129 /* This is only for WIN */
4130 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4131 {
4132 	int i;
4133 	int mac_id;
4134 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4135 
4136 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4137 		struct dp_pdev *pdev = soc->pdev_list[i];
4138 
4139 		if (pdev == NULL)
4140 			continue;
4141 
4142 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4143 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
4144 
4145 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4146 				pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
4147 #ifndef DISABLE_MON_CONFIG
4148 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4149 				pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4150 				RXDMA_MONITOR_BUF);
4151 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4152 				pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
4153 				RXDMA_MONITOR_DST);
4154 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4155 				pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4156 				RXDMA_MONITOR_STATUS);
4157 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4158 				pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
4159 				RXDMA_MONITOR_DESC);
4160 #endif
4161 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4162 				pdev->rxdma_err_dst_ring[mac_id].hal_srng,
4163 				RXDMA_DST);
4164 		}
4165 	}
4166 	return status;
4167 }
4168 #endif
4169 
4170 /*
4171  * dp_soc_attach_target_wifi3() - SOC initialization in the target
4172  * @cdp_soc: Opaque Datapath SOC handle
4173  *
4174  * Return: zero on success, non-zero on failure
4175  */
4176 static QDF_STATUS
4177 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
4178 {
4179 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4180 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4181 
4182 	htt_soc_attach_target(soc->htt_handle);
4183 
4184 	status = dp_rxdma_ring_config(soc);
4185 	if (status != QDF_STATUS_SUCCESS) {
4186 		dp_err("Failed to send htt srng setup messages to target");
4187 		return status;
4188 	}
4189 
4190 	DP_STATS_INIT(soc);
4191 
4192 	/* initialize work queue for stats processing */
4193 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
4194 
4195 	return QDF_STATUS_SUCCESS;
4196 }
4197 
4198 /*
4199  * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
4200  * @txrx_soc: Datapath SOC handle
4201  */
4202 static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
4203 {
4204 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
4205 	return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
4206 }
4207 /*
4208  * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
4209  * @txrx_soc: Datapath SOC handle
4210  * @nss_cfg: nss config
4211  */
4212 static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
4213 {
4214 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
4215 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
4216 
4217 	wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
4218 
4219 	/*
4220 	 * TODO: masked out based on the per offloaded radio
4221 	 */
4222 	switch (config) {
4223 	case dp_nss_cfg_default:
4224 		break;
4225 	case dp_nss_cfg_dbdc:
4226 	case dp_nss_cfg_dbtc:
4227 		wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
4228 		wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
4229 		wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
4230 		wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
4231 		break;
4232 	default:
4233 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4234 			  "Invalid offload config %d", config);
4235 	}
4236 
4237 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4238 		  FL("nss-wifi<0> nss config is enabled"));
4239 }
4240 /*
4241 * dp_vdev_attach_wifi3() - attach txrx vdev
4242 * @txrx_pdev: Datapath PDEV handle
4243 * @vdev_mac_addr: MAC address of the virtual interface
4244 * @vdev_id: VDEV Id
4245 * @wlan_op_mode: VDEV operating mode
4246 *
4247 * Return: DP VDEV handle on success, NULL on failure
4248 */
4249 static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
4250 	uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
4251 {
4252 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4253 	struct dp_soc *soc = pdev->soc;
4254 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
4255 
4256 	if (!vdev) {
4257 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4258 			FL("DP VDEV memory allocation failed"));
4259 		goto fail0;
4260 	}
4261 
4262 	vdev->pdev = pdev;
4263 	vdev->vdev_id = vdev_id;
4264 	vdev->opmode = op_mode;
4265 	vdev->osdev = soc->osdev;
4266 
4267 	vdev->osif_rx = NULL;
4268 	vdev->osif_rsim_rx_decap = NULL;
4269 	vdev->osif_get_key = NULL;
4270 	vdev->osif_rx_mon = NULL;
4271 	vdev->osif_tx_free_ext = NULL;
4272 	vdev->osif_vdev = NULL;
4273 
4274 	vdev->delete.pending = 0;
4275 	vdev->safemode = 0;
4276 	vdev->drop_unenc = 1;
4277 	vdev->sec_type = cdp_sec_type_none;
4278 #ifdef notyet
4279 	vdev->filters_num = 0;
4280 #endif
4281 
4282 	qdf_mem_copy(
4283 		&vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
4284 
4285 	/* TODO: Initialize default HTT meta data that will be used in
4286 	 * TCL descriptors for packets transmitted from this VDEV
4287 	 */
4288 
4289 	TAILQ_INIT(&vdev->peer_list);
4290 
4291 	if ((soc->intr_mode == DP_INTR_POLL) &&
4292 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
4293 		if ((pdev->vdev_count == 0) ||
4294 		    (wlan_op_mode_monitor == vdev->opmode))
4295 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
4296 	}
4297 
4298 	if (wlan_op_mode_monitor == vdev->opmode) {
4299 		pdev->monitor_vdev = vdev;
4300 		return (struct cdp_vdev *)vdev;
4301 	}
4302 
4303 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
4304 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
4305 	vdev->dscp_tid_map_id = 0;
4306 	vdev->mcast_enhancement_en = 0;
4307 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
4308 
4309 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4310 	/* add this vdev into the pdev's list */
4311 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
4312 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4313 	pdev->vdev_count++;
4314 
4315 	dp_tx_vdev_attach(vdev);
4316 
4317 	if (pdev->vdev_count == 1)
4318 		dp_lro_hash_setup(soc, pdev);
4319 
4320 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4321 		"Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
4322 	DP_STATS_INIT(vdev);
4323 
4324 	if (wlan_op_mode_sta == vdev->opmode)
4325 		dp_peer_create_wifi3((struct cdp_vdev *)vdev,
4326 							vdev->mac_addr.raw,
4327 							NULL);
4328 
4329 	return (struct cdp_vdev *)vdev;
4330 
4331 fail0:
4332 	return NULL;
4333 }
4334 
4335 /**
4336  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
4337  * @vdev: Datapath VDEV handle
4338  * @osif_vdev: OSIF vdev handle
4339  * @ctrl_vdev: UMAC vdev handle
4340  * @txrx_ops: Tx and Rx operations
4341  *
4342  * Return: DP VDEV handle on success, NULL on failure
4343  */
4344 static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
4345 	void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
4346 	struct ol_txrx_ops *txrx_ops)
4347 {
4348 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4349 	vdev->osif_vdev = osif_vdev;
4350 	vdev->ctrl_vdev = ctrl_vdev;
4351 	vdev->osif_rx = txrx_ops->rx.rx;
4352 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
4353 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
4354 	vdev->osif_get_key = txrx_ops->get_key;
4355 	vdev->osif_rx_mon = txrx_ops->rx.mon;
4356 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
4357 #ifdef notyet
4358 #if ATH_SUPPORT_WAPI
4359 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
4360 #endif
4361 #endif
4362 #ifdef UMAC_SUPPORT_PROXY_ARP
4363 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
4364 #endif
4365 	vdev->me_convert = txrx_ops->me_convert;
4366 
4367 	/* TODO: Enable the following once Tx code is integrated */
4368 	if (vdev->mesh_vdev)
4369 		txrx_ops->tx.tx = dp_tx_send_mesh;
4370 	else
4371 		txrx_ops->tx.tx = dp_tx_send;
4372 
4373 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
4374 
4375 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
4376 		"DP Vdev Register success");
4377 }
4378 
4379 /**
4380  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
4381  * @vdev: Datapath VDEV handle
4382  * @unmap_only: Flag to indicate "only unmap"
4383  *
4384  * Return: void
4385  */
4386 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
4387 {
4388 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4389 	struct dp_pdev *pdev = vdev->pdev;
4390 	struct dp_soc *soc = pdev->soc;
4391 	struct dp_peer *peer;
4392 	uint16_t *peer_ids;
4393 	uint8_t i = 0, j = 0;
4394 
4395 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
4396 	if (!peer_ids) {
4397 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4398 			"DP alloc failure - unable to flush peers");
4399 		return;
4400 	}
4401 
4402 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4403 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4404 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
4405 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
4406 				if (j < soc->max_peers)
4407 					peer_ids[j++] = peer->peer_ids[i];
4408 	}
4409 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4410 
4411 	for (i = 0; i < j ; i++) {
4412 		if (unmap_only) {
4413 			peer = __dp_peer_find_by_id(soc, peer_ids[i]);
4414 
4415 			if (peer) {
4416 				dp_rx_peer_unmap_handler(soc, peer_ids[i],
4417 							 vdev->vdev_id,
4418 							 peer->mac_addr.raw,
4419 							 0);
4420 			}
4421 		} else {
4422 			peer = dp_peer_find_by_id(soc, peer_ids[i]);
4423 
4424 			if (peer) {
4425 				dp_info("peer: %pM is getting flush",
4426 					peer->mac_addr.raw);
4427 
4428 				dp_peer_delete_wifi3(peer, 0);
4429 				/*
4430 				 * we need to call dp_peer_unref_del_find_by_id
4431 				 * to remove additional ref count incremented
4432 				 * by dp_peer_find_by_id() call.
4433 				 *
4434 				 * Hold the ref count while executing
4435 				 * dp_peer_delete_wifi3() call.
4436 				 *
4437 				 */
4438 				dp_peer_unref_del_find_by_id(peer);
4439 				dp_rx_peer_unmap_handler(soc, peer_ids[i],
4440 							 vdev->vdev_id,
4441 							 peer->mac_addr.raw, 0);
4442 			}
4443 		}
4444 	}
4445 
4446 	qdf_mem_free(peer_ids);
4447 
4448 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4449 		FL("Flushed peers for vdev object %pK "), vdev);
4450 }
4451 
4452 /*
4453  * dp_vdev_detach_wifi3() - Detach txrx vdev
4454  * @txrx_vdev:		Datapath VDEV handle
4455  * @callback:		Callback OL_IF on completion of detach
4456  * @cb_context:	Callback context
4457  *
4458  */
4459 static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
4460 	ol_txrx_vdev_delete_cb callback, void *cb_context)
4461 {
4462 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4463 	struct dp_pdev *pdev = vdev->pdev;
4464 	struct dp_soc *soc = pdev->soc;
4465 	struct dp_neighbour_peer *peer = NULL;
4466 	struct dp_neighbour_peer *temp_peer = NULL;
4467 
4468 	/* preconditions */
4469 	qdf_assert(vdev);
4470 
4471 	if (wlan_op_mode_monitor == vdev->opmode)
4472 		goto free_vdev;
4473 
4474 	if (wlan_op_mode_sta == vdev->opmode)
4475 		dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
4476 
4477 	/*
4478 	 * If Target is hung, flush all peers before detaching vdev
4479 	 * this will free all references held due to missing
4480 	 * unmap commands from Target
4481 	 */
4482 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
4483 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
4484 
4485 	/*
4486 	 * Use peer_ref_mutex while accessing peer_list, in case
4487 	 * a peer is in the process of being removed from the list.
4488 	 */
4489 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4490 	/* check that the vdev has no peers allocated */
4491 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
4492 		/* debug print - will be removed later */
4493 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
4494 			FL("not deleting vdev object %pK (%pM)"
4495 			"until deletion finishes for all its peers"),
4496 			vdev, vdev->mac_addr.raw);
4497 		/* indicate that the vdev needs to be deleted */
4498 		vdev->delete.pending = 1;
4499 		vdev->delete.callback = callback;
4500 		vdev->delete.context = cb_context;
4501 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4502 		return;
4503 	}
4504 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4505 
4506 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4507 	if (!soc->hw_nac_monitor_support) {
4508 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
4509 			      neighbour_peer_list_elem) {
4510 			QDF_ASSERT(peer->vdev != vdev);
4511 		}
4512 	} else {
4513 		TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
4514 				   neighbour_peer_list_elem, temp_peer) {
4515 			if (peer->vdev == vdev) {
4516 				TAILQ_REMOVE(&pdev->neighbour_peers_list, peer,
4517 					     neighbour_peer_list_elem);
4518 				qdf_mem_free(peer);
4519 			}
4520 		}
4521 	}
4522 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4523 
4524 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4525 	dp_tx_vdev_detach(vdev);
4526 	/* remove the vdev from its parent pdev's list */
4527 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
4528 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4529 		FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
4530 
4531 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4532 free_vdev:
4533 	qdf_mem_free(vdev);
4534 
4535 	if (callback)
4536 		callback(cb_context);
4537 }
4538 
4539 /*
4540  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
4541  * @soc - datapath soc handle
4542  * @peer - datapath peer handle
4543  *
4544  * Delete the AST entries belonging to a peer
4545  */
4546 #ifdef FEATURE_AST
4547 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
4548 					      struct dp_peer *peer)
4549 {
4550 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
4551 
4552 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
4553 		dp_peer_del_ast(soc, ast_entry);
4554 
4555 	peer->self_ast_entry = NULL;
4556 }
4557 #else
4558 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
4559 					      struct dp_peer *peer)
4560 {
4561 }
4562 #endif
4563 
4564 #if ATH_SUPPORT_WRAP
4565 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
4566 						uint8_t *peer_mac_addr)
4567 {
4568 	struct dp_peer *peer;
4569 
4570 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
4571 				      0, vdev->vdev_id);
4572 	if (!peer)
4573 		return NULL;
4574 
4575 	if (peer->bss_peer)
4576 		return peer;
4577 
4578 	dp_peer_unref_delete(peer);
4579 	return NULL;
4580 }
4581 #else
4582 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
4583 						uint8_t *peer_mac_addr)
4584 {
4585 	struct dp_peer *peer;
4586 
4587 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
4588 				      0, vdev->vdev_id);
4589 	if (!peer)
4590 		return NULL;
4591 
4592 	if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
4593 		return peer;
4594 
4595 	dp_peer_unref_delete(peer);
4596 	return NULL;
4597 }
4598 #endif
4599 
4600 #ifdef FEATURE_AST
4601 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
4602 					       struct dp_pdev *pdev,
4603 					       uint8_t *peer_mac_addr)
4604 {
4605 	struct dp_ast_entry *ast_entry;
4606 
4607 	qdf_spin_lock_bh(&soc->ast_lock);
4608 	if (soc->ast_override_support)
4609 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
4610 							    pdev->pdev_id);
4611 	else
4612 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
4613 
4614 	if (ast_entry && ast_entry->next_hop &&
4615 	    !ast_entry->delete_in_progress)
4616 		dp_peer_del_ast(soc, ast_entry);
4617 
4618 	qdf_spin_unlock_bh(&soc->ast_lock);
4619 }
4620 #endif
4621 
4622 /*
4623  * dp_peer_create_wifi3() - attach txrx peer
4624  * @txrx_vdev: Datapath VDEV handle
4625  * @peer_mac_addr: Peer MAC address
4626  *
4627  * Return: DP peeer handle on success, NULL on failure
4628  */
4629 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
4630 		uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
4631 {
4632 	struct dp_peer *peer;
4633 	int i;
4634 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4635 	struct dp_pdev *pdev;
4636 	struct dp_soc *soc;
4637 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
4638 
4639 	/* preconditions */
4640 	qdf_assert(vdev);
4641 	qdf_assert(peer_mac_addr);
4642 
4643 	pdev = vdev->pdev;
4644 	soc = pdev->soc;
4645 
4646 	/*
4647 	 * If a peer entry with given MAC address already exists,
4648 	 * reuse the peer and reset the state of peer.
4649 	 */
4650 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
4651 
4652 	if (peer) {
4653 		qdf_atomic_init(&peer->is_default_route_set);
4654 		dp_peer_cleanup(vdev, peer);
4655 
4656 		qdf_spin_lock_bh(&soc->ast_lock);
4657 		dp_peer_delete_ast_entries(soc, peer);
4658 		peer->delete_in_progress = false;
4659 		qdf_spin_unlock_bh(&soc->ast_lock);
4660 
4661 		if ((vdev->opmode == wlan_op_mode_sta) &&
4662 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
4663 		     DP_MAC_ADDR_LEN)) {
4664 			ast_type = CDP_TXRX_AST_TYPE_SELF;
4665 		}
4666 
4667 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
4668 
4669 		/*
4670 		* Control path maintains a node count which is incremented
4671 		* for every new peer create command. Since new peer is not being
4672 		* created and earlier reference is reused here,
4673 		* peer_unref_delete event is sent to control path to
4674 		* increment the count back.
4675 		*/
4676 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
4677 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
4678 				peer->mac_addr.raw, vdev->mac_addr.raw,
4679 				vdev->opmode, peer->ctrl_peer, ctrl_peer);
4680 		}
4681 		peer->ctrl_peer = ctrl_peer;
4682 
4683 		dp_local_peer_id_alloc(pdev, peer);
4684 		DP_STATS_INIT(peer);
4685 
4686 		return (void *)peer;
4687 	} else {
4688 		/*
4689 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
4690 		 * need to remove the AST entry which was earlier added as a WDS
4691 		 * entry.
4692 		 * If an AST entry exists, but no peer entry exists with a given
4693 		 * MAC addresses, we could deduce it as a WDS entry
4694 		 */
4695 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
4696 	}
4697 
4698 #ifdef notyet
4699 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
4700 		soc->mempool_ol_ath_peer);
4701 #else
4702 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
4703 #endif
4704 
4705 	if (!peer)
4706 		return NULL; /* failure */
4707 
4708 	qdf_mem_zero(peer, sizeof(struct dp_peer));
4709 
4710 	TAILQ_INIT(&peer->ast_entry_list);
4711 
4712 	/* store provided params */
4713 	peer->vdev = vdev;
4714 	peer->ctrl_peer = ctrl_peer;
4715 
4716 	if ((vdev->opmode == wlan_op_mode_sta) &&
4717 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
4718 			 DP_MAC_ADDR_LEN)) {
4719 		ast_type = CDP_TXRX_AST_TYPE_SELF;
4720 	}
4721 
4722 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
4723 
4724 	qdf_spinlock_create(&peer->peer_info_lock);
4725 
4726 	qdf_mem_copy(
4727 		&peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
4728 
4729 	/* TODO: See of rx_opt_proc is really required */
4730 	peer->rx_opt_proc = soc->rx_opt_proc;
4731 
4732 	/* initialize the peer_id */
4733 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
4734 		peer->peer_ids[i] = HTT_INVALID_PEER;
4735 
4736 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4737 
4738 	qdf_atomic_init(&peer->ref_cnt);
4739 
4740 	/* keep one reference for attach */
4741 	qdf_atomic_inc(&peer->ref_cnt);
4742 
4743 	/* add this peer into the vdev's list */
4744 	if (wlan_op_mode_sta == vdev->opmode)
4745 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
4746 	else
4747 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
4748 
4749 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4750 
4751 	/* TODO: See if hash based search is required */
4752 	dp_peer_find_hash_add(soc, peer);
4753 
4754 	/* Initialize the peer state */
4755 	peer->state = OL_TXRX_PEER_STATE_DISC;
4756 
4757 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4758 		"vdev %pK created peer %pK (%pM) ref_cnt: %d",
4759 		vdev, peer, peer->mac_addr.raw,
4760 		qdf_atomic_read(&peer->ref_cnt));
4761 	/*
4762 	 * For every peer MAp message search and set if bss_peer
4763 	 */
4764 	if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
4765 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4766 			"vdev bss_peer!!!!");
4767 		peer->bss_peer = 1;
4768 		vdev->vap_bss_peer = peer;
4769 	}
4770 	for (i = 0; i < DP_MAX_TIDS; i++)
4771 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
4772 
4773 	dp_local_peer_id_alloc(pdev, peer);
4774 	DP_STATS_INIT(peer);
4775 	return (void *)peer;
4776 }
4777 
4778 /*
4779  * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
4780  * @vdev: Datapath VDEV handle
4781  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
4782  * @hash_based: pointer to hash value (enabled/disabled) to be populated
4783  *
4784  * Return: None
4785  */
4786 static
4787 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
4788 				  enum cdp_host_reo_dest_ring *reo_dest,
4789 				  bool *hash_based)
4790 {
4791 	struct dp_soc *soc;
4792 	struct dp_pdev *pdev;
4793 
4794 	pdev = vdev->pdev;
4795 	soc = pdev->soc;
4796 	/*
4797 	 * hash based steering is disabled for Radios which are offloaded
4798 	 * to NSS
4799 	 */
4800 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
4801 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
4802 
4803 	/*
4804 	 * Below line of code will ensure the proper reo_dest ring is chosen
4805 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
4806 	 */
4807 	*reo_dest = pdev->reo_dest;
4808 }
4809 
4810 #ifdef IPA_OFFLOAD
4811 /*
4812  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
4813  * @vdev: Datapath VDEV handle
4814  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
4815  * @hash_based: pointer to hash value (enabled/disabled) to be populated
4816  *
4817  * If IPA is enabled in ini, for SAP mode, disable hash based
4818  * steering, use default reo_dst ring for RX. Use config values for other modes.
4819  * Return: None
4820  */
4821 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
4822 				       enum cdp_host_reo_dest_ring *reo_dest,
4823 				       bool *hash_based)
4824 {
4825 	struct dp_soc *soc;
4826 	struct dp_pdev *pdev;
4827 
4828 	pdev = vdev->pdev;
4829 	soc = pdev->soc;
4830 
4831 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
4832 
4833 	/*
4834 	 * If IPA is enabled, disable hash-based flow steering and set
4835 	 * reo_dest_ring_4 as the REO ring to receive packets on.
4836 	 * IPA is configured to reap reo_dest_ring_4.
4837 	 *
4838 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
4839 	 * value enum value is from 1 - 4.
4840 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
4841 	 */
4842 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4843 		if (vdev->opmode == wlan_op_mode_ap) {
4844 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
4845 			*hash_based = 0;
4846 		}
4847 	}
4848 }
4849 
4850 #else
4851 
4852 /*
4853  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
4854  * @vdev: Datapath VDEV handle
4855  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
4856  * @hash_based: pointer to hash value (enabled/disabled) to be populated
4857  *
4858  * Use system config values for hash based steering.
4859  * Return: None
4860  */
4861 
4862 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
4863 				       enum cdp_host_reo_dest_ring *reo_dest,
4864 				       bool *hash_based)
4865 {
4866 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
4867 }
4868 #endif /* IPA_OFFLOAD */
4869 
4870 /*
4871  * dp_peer_setup_wifi3() - initialize the peer
4872  * @vdev_hdl: virtual device object
4873  * @peer: Peer object
4874  *
4875  * Return: void
4876  */
4877 static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
4878 {
4879 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
4880 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4881 	struct dp_pdev *pdev;
4882 	struct dp_soc *soc;
4883 	bool hash_based = 0;
4884 	enum cdp_host_reo_dest_ring reo_dest;
4885 
4886 	/* preconditions */
4887 	qdf_assert(vdev);
4888 	qdf_assert(peer);
4889 
4890 	pdev = vdev->pdev;
4891 	soc = pdev->soc;
4892 
4893 	peer->last_assoc_rcvd = 0;
4894 	peer->last_disassoc_rcvd = 0;
4895 	peer->last_deauth_rcvd = 0;
4896 
4897 	dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
4898 
4899 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
4900 		pdev->pdev_id, vdev->vdev_id,
4901 		vdev->opmode, hash_based, reo_dest);
4902 
4903 
4904 	/*
4905 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
4906 	 * i.e both the devices have same MAC address. In these
4907 	 * cases we want such pkts to be processed in NULL Q handler
4908 	 * which is REO2TCL ring. for this reason we should
4909 	 * not setup reo_queues and default route for bss_peer.
4910 	 */
4911 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap)
4912 		return;
4913 
4914 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
4915 		/* TODO: Check the destination ring number to be passed to FW */
4916 		soc->cdp_soc.ol_ops->peer_set_default_routing(
4917 				pdev->ctrl_pdev, peer->mac_addr.raw,
4918 				peer->vdev->vdev_id, hash_based, reo_dest);
4919 	}
4920 
4921 	qdf_atomic_set(&peer->is_default_route_set, 1);
4922 
4923 	dp_peer_rx_init(pdev, peer);
4924 	return;
4925 }
4926 
4927 /*
4928  * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
4929  * @vdev_handle: virtual device object
4930  * @htt_pkt_type: type of pkt
4931  *
4932  * Return: void
4933  */
4934 static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
4935 	 enum htt_cmn_pkt_type val)
4936 {
4937 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4938 	vdev->tx_encap_type = val;
4939 }
4940 
4941 /*
4942  * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
4943  * @vdev_handle: virtual device object
4944  * @htt_pkt_type: type of pkt
4945  *
4946  * Return: void
4947  */
4948 static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
4949 	 enum htt_cmn_pkt_type val)
4950 {
4951 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4952 	vdev->rx_decap_type = val;
4953 }
4954 
4955 /*
4956  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
4957  * @txrx_soc: cdp soc handle
4958  * @ac: Access category
4959  * @value: timeout value in millisec
4960  *
4961  * Return: void
4962  */
4963 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
4964 				    uint8_t ac, uint32_t value)
4965 {
4966 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4967 
4968 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
4969 }
4970 
4971 /*
4972  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
4973  * @txrx_soc: cdp soc handle
4974  * @ac: access category
4975  * @value: timeout value in millisec
4976  *
4977  * Return: void
4978  */
4979 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
4980 				    uint8_t ac, uint32_t *value)
4981 {
4982 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4983 
4984 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
4985 }
4986 
4987 /*
4988  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
4989  * @pdev_handle: physical device object
4990  * @val: reo destination ring index (1 - 4)
4991  *
4992  * Return: void
4993  */
4994 static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
4995 	 enum cdp_host_reo_dest_ring val)
4996 {
4997 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4998 
4999 	if (pdev)
5000 		pdev->reo_dest = val;
5001 }
5002 
5003 /*
5004  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
5005  * @pdev_handle: physical device object
5006  *
5007  * Return: reo destination ring index
5008  */
5009 static enum cdp_host_reo_dest_ring
5010 dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
5011 {
5012 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5013 
5014 	if (pdev)
5015 		return pdev->reo_dest;
5016 	else
5017 		return cdp_host_reo_dest_ring_unknown;
5018 }
5019 
5020 /*
5021  * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
5022  * @pdev_handle: device object
5023  * @val: value to be set
5024  *
5025  * Return: void
5026  */
5027 static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
5028 	 uint32_t val)
5029 {
5030 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5031 
5032 	/* Enable/Disable smart mesh filtering. This flag will be checked
5033 	 * during rx processing to check if packets are from NAC clients.
5034 	 */
5035 	pdev->filter_neighbour_peers = val;
5036 	return 0;
5037 }
5038 
5039 /*
5040  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
5041  * address for smart mesh filtering
5042  * @vdev_handle: virtual device object
5043  * @cmd: Add/Del command
5044  * @macaddr: nac client mac address
5045  *
5046  * Return: void
5047  */
5048 static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
5049 					    uint32_t cmd, uint8_t *macaddr)
5050 {
5051 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5052 	struct dp_pdev *pdev = vdev->pdev;
5053 	struct dp_neighbour_peer *peer = NULL;
5054 
5055 	if (!macaddr)
5056 		goto fail0;
5057 
5058 	/* Store address of NAC (neighbour peer) which will be checked
5059 	 * against TA of received packets.
5060 	 */
5061 	if (cmd == DP_NAC_PARAM_ADD) {
5062 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
5063 				sizeof(*peer));
5064 
5065 		if (!peer) {
5066 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5067 				FL("DP neighbour peer node memory allocation failed"));
5068 			goto fail0;
5069 		}
5070 
5071 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
5072 			macaddr, DP_MAC_ADDR_LEN);
5073 		peer->vdev = vdev;
5074 
5075 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5076 
5077 		/* add this neighbour peer into the list */
5078 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
5079 				neighbour_peer_list_elem);
5080 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5081 
5082 		/* first neighbour */
5083 		if (!pdev->neighbour_peers_added) {
5084 			pdev->neighbour_peers_added = true;
5085 			dp_ppdu_ring_cfg(pdev);
5086 		}
5087 		return 1;
5088 
5089 	} else if (cmd == DP_NAC_PARAM_DEL) {
5090 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5091 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
5092 				neighbour_peer_list_elem) {
5093 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
5094 				macaddr, DP_MAC_ADDR_LEN)) {
5095 				/* delete this peer from the list */
5096 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
5097 					peer, neighbour_peer_list_elem);
5098 				qdf_mem_free(peer);
5099 				break;
5100 			}
5101 		}
5102 		/* last neighbour deleted */
5103 		if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) {
5104 			pdev->neighbour_peers_added = false;
5105 			dp_ppdu_ring_cfg(pdev);
5106 		}
5107 
5108 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5109 
5110 		if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
5111 		    !pdev->enhanced_stats_en)
5112 			dp_ppdu_ring_reset(pdev);
5113 		return 1;
5114 
5115 	}
5116 
5117 fail0:
5118 	return 0;
5119 }
5120 
5121 /*
5122  * dp_get_sec_type() - Get the security type
5123  * @peer:		Datapath peer handle
5124  * @sec_idx:    Security id (mcast, ucast)
5125  *
5126  * return sec_type: Security type
5127  */
5128 static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
5129 {
5130 	struct dp_peer *dpeer = (struct dp_peer *)peer;
5131 
5132 	return dpeer->security[sec_idx].sec_type;
5133 }
5134 
5135 /*
5136  * dp_peer_authorize() - authorize txrx peer
5137  * @peer_handle:		Datapath peer handle
5138  * @authorize
5139  *
5140  */
5141 static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
5142 {
5143 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5144 	struct dp_soc *soc;
5145 
5146 	if (peer != NULL) {
5147 		soc = peer->vdev->pdev->soc;
5148 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
5149 		peer->authorize = authorize ? 1 : 0;
5150 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5151 	}
5152 }
5153 
5154 static void dp_reset_and_release_peer_mem(struct dp_soc *soc,
5155 					  struct dp_pdev *pdev,
5156 					  struct dp_peer *peer,
5157 					  uint32_t vdev_id)
5158 {
5159 	struct dp_vdev *vdev = NULL;
5160 	struct dp_peer *bss_peer = NULL;
5161 	uint8_t *m_addr = NULL;
5162 
5163 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5164 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5165 		if (vdev->vdev_id == vdev_id)
5166 			break;
5167 	}
5168 	if (!vdev) {
5169 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5170 			  "vdev is NULL");
5171 	} else {
5172 		if (vdev->vap_bss_peer == peer)
5173 		    vdev->vap_bss_peer = NULL;
5174 		m_addr = peer->mac_addr.raw;
5175 		if (soc->cdp_soc.ol_ops->peer_unref_delete)
5176 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
5177 				m_addr, vdev->mac_addr.raw, vdev->opmode,
5178 				peer->ctrl_peer, NULL);
5179 
5180 		if (vdev && vdev->vap_bss_peer) {
5181 		    bss_peer = vdev->vap_bss_peer;
5182 		    DP_UPDATE_STATS(vdev, peer);
5183 		}
5184 	}
5185 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5186 
5187 	/*
5188 	 * Peer AST list hast to be empty here
5189 	 */
5190 	DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
5191 
5192 	qdf_mem_free(peer);
5193 }
5194 
5195 /**
5196  * dp_delete_pending_vdev() - check and process vdev delete
5197  * @pdev: DP specific pdev pointer
5198  * @vdev: DP specific vdev pointer
5199  * @vdev_id: vdev id corresponding to vdev
5200  *
5201  * This API does following:
5202  * 1) It releases tx flow pools buffers as vdev is
5203  *    going down and no peers are associated.
5204  * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory
5205  */
5206 static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev,
5207 				   uint8_t vdev_id)
5208 {
5209 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
5210 	void *vdev_delete_context = NULL;
5211 
5212 	vdev_delete_cb = vdev->delete.callback;
5213 	vdev_delete_context = vdev->delete.context;
5214 
5215 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5216 		  FL("deleting vdev object %pK (%pM)- its last peer is done"),
5217 		  vdev, vdev->mac_addr.raw);
5218 	/* all peers are gone, go ahead and delete it */
5219 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
5220 			FLOW_TYPE_VDEV, vdev_id);
5221 	dp_tx_vdev_detach(vdev);
5222 
5223 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5224 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
5225 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5226 
5227 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5228 		  FL("deleting vdev object %pK (%pM)"),
5229 		  vdev, vdev->mac_addr.raw);
5230 	qdf_mem_free(vdev);
5231 	vdev = NULL;
5232 
5233 	if (vdev_delete_cb)
5234 		vdev_delete_cb(vdev_delete_context);
5235 }
5236 
5237 /*
5238  * dp_peer_unref_delete() - unref and delete peer
5239  * @peer_handle:		Datapath peer handle
5240  *
5241  */
5242 void dp_peer_unref_delete(void *peer_handle)
5243 {
5244 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5245 	struct dp_vdev *vdev = peer->vdev;
5246 	struct dp_pdev *pdev = vdev->pdev;
5247 	struct dp_soc *soc = pdev->soc;
5248 	struct dp_peer *tmppeer;
5249 	int found = 0;
5250 	uint16_t peer_id;
5251 	uint16_t vdev_id;
5252 	bool delete_vdev;
5253 
5254 	/*
5255 	 * Hold the lock all the way from checking if the peer ref count
5256 	 * is zero until the peer references are removed from the hash
5257 	 * table and vdev list (if the peer ref count is zero).
5258 	 * This protects against a new HL tx operation starting to use the
5259 	 * peer object just after this function concludes it's done being used.
5260 	 * Furthermore, the lock needs to be held while checking whether the
5261 	 * vdev's list of peers is empty, to make sure that list is not modified
5262 	 * concurrently with the empty check.
5263 	 */
5264 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5265 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
5266 		peer_id = peer->peer_ids[0];
5267 		vdev_id = vdev->vdev_id;
5268 
5269 		/*
5270 		 * Make sure that the reference to the peer in
5271 		 * peer object map is removed
5272 		 */
5273 		if (peer_id != HTT_INVALID_PEER)
5274 			soc->peer_id_to_obj_map[peer_id] = NULL;
5275 
5276 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5277 			"Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
5278 
5279 		/* remove the reference to the peer from the hash table */
5280 		dp_peer_find_hash_remove(soc, peer);
5281 
5282 		qdf_spin_lock_bh(&soc->ast_lock);
5283 		if (peer->self_ast_entry) {
5284 			dp_peer_del_ast(soc, peer->self_ast_entry);
5285 			peer->self_ast_entry = NULL;
5286 		}
5287 		qdf_spin_unlock_bh(&soc->ast_lock);
5288 
5289 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
5290 			if (tmppeer == peer) {
5291 				found = 1;
5292 				break;
5293 			}
5294 		}
5295 
5296 		if (found) {
5297 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
5298 				peer_list_elem);
5299 		} else {
5300 			/*Ignoring the remove operation as peer not found*/
5301 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5302 				  "peer:%pK not found in vdev:%pK peerlist:%pK",
5303 				  peer, vdev, &peer->vdev->peer_list);
5304 		}
5305 
5306 		/* cleanup the peer data */
5307 		dp_peer_cleanup(vdev, peer);
5308 
5309 		/* check whether the parent vdev has no peers left */
5310 		if (TAILQ_EMPTY(&vdev->peer_list)) {
5311 			/*
5312 			 * capture vdev delete pending flag's status
5313 			 * while holding peer_ref_mutex lock
5314 			 */
5315 			delete_vdev = vdev->delete.pending;
5316 			/*
5317 			 * Now that there are no references to the peer, we can
5318 			 * release the peer reference lock.
5319 			 */
5320 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5321 			/*
5322 			 * Check if the parent vdev was waiting for its peers
5323 			 * to be deleted, in order for it to be deleted too.
5324 			 */
5325 			if (delete_vdev)
5326 				dp_delete_pending_vdev(pdev, vdev, vdev_id);
5327 		} else {
5328 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5329 		}
5330 		dp_reset_and_release_peer_mem(soc, pdev, peer, vdev_id);
5331 
5332 	} else {
5333 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5334 	}
5335 }
5336 
5337 /*
5338  * dp_peer_detach_wifi3() – Detach txrx peer
5339  * @peer_handle: Datapath peer handle
5340  * @bitmap: bitmap indicating special handling of request.
5341  *
5342  */
5343 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
5344 {
5345 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5346 
5347 	/* redirect the peer's rx delivery function to point to a
5348 	 * discard func
5349 	 */
5350 
5351 	peer->rx_opt_proc = dp_rx_discard;
5352 
5353 	/* Do not make ctrl_peer to NULL for connected sta peers.
5354 	 * We need ctrl_peer to release the reference during dp
5355 	 * peer free. This reference was held for
5356 	 * obj_mgr peer during the creation of dp peer.
5357 	 */
5358 	if (!(peer->vdev && (peer->vdev->opmode != wlan_op_mode_sta) &&
5359 	      !peer->bss_peer))
5360 		peer->ctrl_peer = NULL;
5361 
5362 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5363 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
5364 
5365 	dp_local_peer_id_free(peer->vdev->pdev, peer);
5366 	qdf_spinlock_destroy(&peer->peer_info_lock);
5367 
5368 	/*
5369 	 * Remove the reference added during peer_attach.
5370 	 * The peer will still be left allocated until the
5371 	 * PEER_UNMAP message arrives to remove the other
5372 	 * reference, added by the PEER_MAP message.
5373 	 */
5374 	dp_peer_unref_delete(peer_handle);
5375 }
5376 
5377 /*
5378  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
5379  * @peer_handle:		Datapath peer handle
5380  *
5381  */
5382 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
5383 {
5384 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
5385 	return vdev->mac_addr.raw;
5386 }
5387 
5388 /*
5389  * dp_vdev_set_wds() - Enable per packet stats
5390  * @vdev_handle: DP VDEV handle
5391  * @val: value
5392  *
5393  * Return: none
5394  */
5395 static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
5396 {
5397 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5398 
5399 	vdev->wds_enabled = val;
5400 	return 0;
5401 }
5402 
5403 /*
5404  * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
5405  * @peer_handle:		Datapath peer handle
5406  *
5407  */
5408 static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
5409 						uint8_t vdev_id)
5410 {
5411 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
5412 	struct dp_vdev *vdev = NULL;
5413 
5414 	if (qdf_unlikely(!pdev))
5415 		return NULL;
5416 
5417 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5418 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5419 		if (vdev->vdev_id == vdev_id)
5420 			break;
5421 	}
5422 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5423 
5424 	return (struct cdp_vdev *)vdev;
5425 }
5426 
5427 /*
5428  * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev handle of monitor mode
5429  * @dev: PDEV handle
5430  *
5431  * Return: VDEV handle of monitor mode
5432  */
5433 
5434 static struct cdp_vdev *dp_get_mon_vdev_from_pdev_wifi3(struct cdp_pdev *dev)
5435 {
5436 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
5437 
5438 	if (qdf_unlikely(!pdev))
5439 		return NULL;
5440 
5441 	return (struct cdp_vdev *)pdev->monitor_vdev;
5442 }
5443 
5444 static int dp_get_opmode(struct cdp_vdev *vdev_handle)
5445 {
5446 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5447 
5448 	return vdev->opmode;
5449 }
5450 
5451 static
5452 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_vdev *pvdev,
5453 					  ol_txrx_rx_fp *stack_fn_p,
5454 					  ol_osif_vdev_handle *osif_vdev_p)
5455 {
5456 	struct dp_vdev *vdev = dp_get_dp_vdev_from_cdp_vdev(pvdev);
5457 
5458 	qdf_assert(vdev);
5459 	*stack_fn_p = vdev->osif_rx_stack;
5460 	*osif_vdev_p = vdev->osif_vdev;
5461 }
5462 
5463 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
5464 {
5465 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
5466 	struct dp_pdev *pdev = vdev->pdev;
5467 
5468 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
5469 }
5470 
5471 /**
5472  * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
5473  *                                 ring based on target
5474  * @soc: soc handle
5475  * @mac_for_pdev: pdev_id
5476  * @pdev: physical device handle
5477  * @ring_num: mac id
5478  * @htt_tlv_filter: tlv filter
5479  *
5480  * Return: zero on success, non-zero on failure
5481  */
5482 static inline
5483 QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
5484 				       struct dp_pdev *pdev, uint8_t ring_num,
5485 				       struct htt_rx_ring_tlv_filter htt_tlv_filter)
5486 {
5487 	QDF_STATUS status;
5488 
5489 	if (soc->wlan_cfg_ctx->rxdma1_enable)
5490 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5491 					     pdev->rxdma_mon_buf_ring[ring_num]
5492 					     .hal_srng,
5493 					     RXDMA_MONITOR_BUF, RX_BUFFER_SIZE,
5494 					     &htt_tlv_filter);
5495 	else
5496 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5497 					     pdev->rx_mac_buf_ring[ring_num]
5498 					     .hal_srng,
5499 					     RXDMA_BUF, RX_BUFFER_SIZE,
5500 					     &htt_tlv_filter);
5501 
5502 	return status;
5503 }
5504 
5505 /**
5506  * dp_reset_monitor_mode() - Disable monitor mode
5507  * @pdev_handle: Datapath PDEV handle
5508  *
5509  * Return: 0 on success, not 0 on failure
5510  */
5511 static QDF_STATUS dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
5512 {
5513 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5514 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5515 	struct dp_soc *soc = pdev->soc;
5516 	uint8_t pdev_id;
5517 	int mac_id;
5518 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5519 
5520 	pdev_id = pdev->pdev_id;
5521 	soc = pdev->soc;
5522 
5523 	qdf_spin_lock_bh(&pdev->mon_lock);
5524 
5525 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
5526 
5527 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5528 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5529 
5530 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
5531 						     pdev, mac_id,
5532 						     htt_tlv_filter);
5533 
5534 		if (status != QDF_STATUS_SUCCESS) {
5535 			dp_err("Failed to send tlv filter for monitor mode rings");
5536 			return status;
5537 		}
5538 
5539 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5540 			    pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5541 			    RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE,
5542 			    &htt_tlv_filter);
5543 	}
5544 
5545 	pdev->monitor_vdev = NULL;
5546 	pdev->mcopy_mode = 0;
5547 	pdev->monitor_configured = false;
5548 
5549 	qdf_spin_unlock_bh(&pdev->mon_lock);
5550 
5551 	return QDF_STATUS_SUCCESS;
5552 }
5553 
5554 /**
5555  * dp_set_nac() - set peer_nac
5556  * @peer_handle: Datapath PEER handle
5557  *
5558  * Return: void
5559  */
5560 static void dp_set_nac(struct cdp_peer *peer_handle)
5561 {
5562 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5563 
5564 	peer->nac = 1;
5565 }
5566 
5567 /**
5568  * dp_get_tx_pending() - read pending tx
5569  * @pdev_handle: Datapath PDEV handle
5570  *
5571  * Return: outstanding tx
5572  */
5573 static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
5574 {
5575 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5576 
5577 	return qdf_atomic_read(&pdev->num_tx_outstanding);
5578 }
5579 
5580 /**
5581  * dp_get_peer_mac_from_peer_id() - get peer mac
5582  * @pdev_handle: Datapath PDEV handle
5583  * @peer_id: Peer ID
5584  * @peer_mac: MAC addr of PEER
5585  *
5586  * Return: void
5587  */
5588 static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
5589 	uint32_t peer_id, uint8_t *peer_mac)
5590 {
5591 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5592 	struct dp_peer *peer;
5593 
5594 	if (pdev && peer_mac) {
5595 		peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
5596 		if (peer) {
5597 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
5598 				     DP_MAC_ADDR_LEN);
5599 			dp_peer_unref_del_find_by_id(peer);
5600 		}
5601 	}
5602 }
5603 
5604 /**
5605  * dp_pdev_configure_monitor_rings() - configure monitor rings
5606  * @vdev_handle: Datapath VDEV handle
5607  *
5608  * Return: void
5609  */
5610 static QDF_STATUS dp_pdev_configure_monitor_rings(struct dp_pdev *pdev)
5611 {
5612 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5613 	struct dp_soc *soc;
5614 	uint8_t pdev_id;
5615 	int mac_id;
5616 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5617 
5618 	pdev_id = pdev->pdev_id;
5619 	soc = pdev->soc;
5620 
5621 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5622 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
5623 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
5624 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
5625 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
5626 		pdev->mo_data_filter);
5627 
5628 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
5629 
5630 	htt_tlv_filter.mpdu_start = 1;
5631 	htt_tlv_filter.msdu_start = 1;
5632 	htt_tlv_filter.packet = 1;
5633 	htt_tlv_filter.msdu_end = 1;
5634 	htt_tlv_filter.mpdu_end = 1;
5635 	htt_tlv_filter.packet_header = 1;
5636 	htt_tlv_filter.attention = 1;
5637 	htt_tlv_filter.ppdu_start = 0;
5638 	htt_tlv_filter.ppdu_end = 0;
5639 	htt_tlv_filter.ppdu_end_user_stats = 0;
5640 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
5641 	htt_tlv_filter.ppdu_end_status_done = 0;
5642 	htt_tlv_filter.header_per_msdu = 1;
5643 	htt_tlv_filter.enable_fp =
5644 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
5645 	htt_tlv_filter.enable_md = 0;
5646 	htt_tlv_filter.enable_mo =
5647 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
5648 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
5649 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
5650 	if (pdev->mcopy_mode)
5651 		htt_tlv_filter.fp_data_filter = 0;
5652 	else
5653 		htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
5654 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
5655 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
5656 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
5657 
5658 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5659 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5660 
5661 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
5662 						     pdev, mac_id,
5663 						     htt_tlv_filter);
5664 
5665 		if (status != QDF_STATUS_SUCCESS) {
5666 			dp_err("Failed to send tlv filter for monitor mode rings");
5667 			return status;
5668 		}
5669 	}
5670 
5671 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
5672 
5673 	htt_tlv_filter.mpdu_start = 1;
5674 	htt_tlv_filter.msdu_start = 0;
5675 	htt_tlv_filter.packet = 0;
5676 	htt_tlv_filter.msdu_end = 0;
5677 	htt_tlv_filter.mpdu_end = 0;
5678 	htt_tlv_filter.attention = 0;
5679 	htt_tlv_filter.ppdu_start = 1;
5680 	htt_tlv_filter.ppdu_end = 1;
5681 	htt_tlv_filter.ppdu_end_user_stats = 1;
5682 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5683 	htt_tlv_filter.ppdu_end_status_done = 1;
5684 	htt_tlv_filter.enable_fp = 1;
5685 	htt_tlv_filter.enable_md = 0;
5686 	htt_tlv_filter.enable_mo = 1;
5687 	if (pdev->mcopy_mode) {
5688 		htt_tlv_filter.packet_header = 1;
5689 	}
5690 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5691 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5692 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5693 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5694 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5695 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
5696 
5697 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5698 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5699 						pdev->pdev_id);
5700 
5701 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5702 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5703 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5704 	}
5705 
5706 	return status;
5707 }
5708 
5709 /**
5710  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
5711  * @vdev_handle: Datapath VDEV handle
5712  * @smart_monitor: Flag to denote if its smart monitor mode
5713  *
5714  * Return: 0 on success, not 0 on failure
5715  */
5716 static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
5717 					   uint8_t smart_monitor)
5718 {
5719 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5720 	struct dp_pdev *pdev;
5721 
5722 	qdf_assert(vdev);
5723 
5724 	pdev = vdev->pdev;
5725 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
5726 		  "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
5727 		  pdev, pdev->pdev_id, pdev->soc, vdev);
5728 
5729 	/*Check if current pdev's monitor_vdev exists */
5730 	if (pdev->monitor_configured) {
5731 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5732 			  "monitor vap already created vdev=%pK\n", vdev);
5733 		qdf_assert(vdev);
5734 		return QDF_STATUS_E_RESOURCES;
5735 	}
5736 
5737 	pdev->monitor_vdev = vdev;
5738 	pdev->monitor_configured = true;
5739 
5740 	/* If smart monitor mode, do not configure monitor ring */
5741 	if (smart_monitor)
5742 		return QDF_STATUS_SUCCESS;
5743 
5744 	return dp_pdev_configure_monitor_rings(pdev);
5745 }
5746 
5747 /**
5748  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
5749  * @pdev_handle: Datapath PDEV handle
5750  * @filter_val: Flag to select Filter for monitor mode
5751  * Return: 0 on success, not 0 on failure
5752  */
5753 static QDF_STATUS
5754 dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
5755 				   struct cdp_monitor_filter *filter_val)
5756 {
5757 	/* Many monitor VAPs can exists in a system but only one can be up at
5758 	 * anytime
5759 	 */
5760 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5761 	struct dp_vdev *vdev = pdev->monitor_vdev;
5762 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5763 	struct dp_soc *soc;
5764 	uint8_t pdev_id;
5765 	int mac_id;
5766 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5767 
5768 	pdev_id = pdev->pdev_id;
5769 	soc = pdev->soc;
5770 
5771 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
5772 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
5773 		pdev, pdev_id, soc, vdev);
5774 
5775 	/*Check if current pdev's monitor_vdev exists */
5776 	if (!pdev->monitor_vdev) {
5777 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5778 			"vdev=%pK", vdev);
5779 		qdf_assert(vdev);
5780 	}
5781 
5782 	/* update filter mode, type in pdev structure */
5783 	pdev->mon_filter_mode = filter_val->mode;
5784 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
5785 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
5786 	pdev->fp_data_filter = filter_val->fp_data;
5787 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
5788 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
5789 	pdev->mo_data_filter = filter_val->mo_data;
5790 
5791 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5792 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
5793 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
5794 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
5795 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
5796 		pdev->mo_data_filter);
5797 
5798 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
5799 
5800 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5801 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5802 
5803 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
5804 						     pdev, mac_id,
5805 						     htt_tlv_filter);
5806 
5807 		if (status != QDF_STATUS_SUCCESS) {
5808 			dp_err("Failed to send tlv filter for monitor mode rings");
5809 			return status;
5810 		}
5811 
5812 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5813 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5814 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5815 	}
5816 
5817 	htt_tlv_filter.mpdu_start = 1;
5818 	htt_tlv_filter.msdu_start = 1;
5819 	htt_tlv_filter.packet = 1;
5820 	htt_tlv_filter.msdu_end = 1;
5821 	htt_tlv_filter.mpdu_end = 1;
5822 	htt_tlv_filter.packet_header = 1;
5823 	htt_tlv_filter.attention = 1;
5824 	htt_tlv_filter.ppdu_start = 0;
5825 	htt_tlv_filter.ppdu_end = 0;
5826 	htt_tlv_filter.ppdu_end_user_stats = 0;
5827 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
5828 	htt_tlv_filter.ppdu_end_status_done = 0;
5829 	htt_tlv_filter.header_per_msdu = 1;
5830 	htt_tlv_filter.enable_fp =
5831 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
5832 	htt_tlv_filter.enable_md = 0;
5833 	htt_tlv_filter.enable_mo =
5834 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
5835 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
5836 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
5837 	if (pdev->mcopy_mode)
5838 		htt_tlv_filter.fp_data_filter = 0;
5839 	else
5840 		htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
5841 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
5842 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
5843 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
5844 
5845 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5846 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5847 
5848 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
5849 						     pdev, mac_id,
5850 						     htt_tlv_filter);
5851 
5852 		if (status != QDF_STATUS_SUCCESS) {
5853 			dp_err("Failed to send tlv filter for monitor mode rings");
5854 			return status;
5855 		}
5856 	}
5857 
5858 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
5859 
5860 	htt_tlv_filter.mpdu_start = 1;
5861 	htt_tlv_filter.msdu_start = 0;
5862 	htt_tlv_filter.packet = 0;
5863 	htt_tlv_filter.msdu_end = 0;
5864 	htt_tlv_filter.mpdu_end = 0;
5865 	htt_tlv_filter.attention = 0;
5866 	htt_tlv_filter.ppdu_start = 1;
5867 	htt_tlv_filter.ppdu_end = 1;
5868 	htt_tlv_filter.ppdu_end_user_stats = 1;
5869 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5870 	htt_tlv_filter.ppdu_end_status_done = 1;
5871 	htt_tlv_filter.enable_fp = 1;
5872 	htt_tlv_filter.enable_md = 0;
5873 	htt_tlv_filter.enable_mo = 1;
5874 	if (pdev->mcopy_mode) {
5875 		htt_tlv_filter.packet_header = 1;
5876 	}
5877 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5878 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5879 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5880 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5881 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5882 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
5883 
5884 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5885 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5886 						pdev->pdev_id);
5887 
5888 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5889 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5890 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5891 	}
5892 
5893 	return QDF_STATUS_SUCCESS;
5894 }
5895 
5896 /**
5897  * dp_get_pdev_id_frm_pdev() - get pdev_id
5898  * @pdev_handle: Datapath PDEV handle
5899  *
5900  * Return: pdev_id
5901  */
5902 static
5903 uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
5904 {
5905 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5906 
5907 	return pdev->pdev_id;
5908 }
5909 
5910 /**
5911  * dp_pdev_set_chan_noise_floor() - set channel noise floor
5912  * @pdev_handle: Datapath PDEV handle
5913  * @chan_noise_floor: Channel Noise Floor
5914  *
5915  * Return: void
5916  */
5917 static
5918 void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
5919 				  int16_t chan_noise_floor)
5920 {
5921 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5922 
5923 	pdev->chan_noise_floor = chan_noise_floor;
5924 }
5925 
5926 /**
5927  * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
5928  * @vdev_handle: Datapath VDEV handle
5929  * Return: true on ucast filter flag set
5930  */
5931 static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
5932 {
5933 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5934 	struct dp_pdev *pdev;
5935 
5936 	pdev = vdev->pdev;
5937 
5938 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
5939 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
5940 		return true;
5941 
5942 	return false;
5943 }
5944 
5945 /**
5946  * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
5947  * @vdev_handle: Datapath VDEV handle
5948  * Return: true on mcast filter flag set
5949  */
5950 static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
5951 {
5952 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5953 	struct dp_pdev *pdev;
5954 
5955 	pdev = vdev->pdev;
5956 
5957 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
5958 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
5959 		return true;
5960 
5961 	return false;
5962 }
5963 
5964 /**
5965  * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
5966  * @vdev_handle: Datapath VDEV handle
5967  * Return: true on non data filter flag set
5968  */
5969 static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
5970 {
5971 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5972 	struct dp_pdev *pdev;
5973 
5974 	pdev = vdev->pdev;
5975 
5976 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
5977 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
5978 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
5979 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
5980 			return true;
5981 		}
5982 	}
5983 
5984 	return false;
5985 }
5986 
5987 #ifdef MESH_MODE_SUPPORT
5988 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
5989 {
5990 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5991 
5992 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5993 		FL("val %d"), val);
5994 	vdev->mesh_vdev = val;
5995 }
5996 
5997 /*
5998  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
5999  * @vdev_hdl: virtual device object
6000  * @val: value to be set
6001  *
6002  * Return: void
6003  */
6004 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
6005 {
6006 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
6007 
6008 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6009 		FL("val %d"), val);
6010 	vdev->mesh_rx_filter = val;
6011 }
6012 #endif
6013 
6014 /*
6015  * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
6016  * Current scope is bar received count
6017  *
6018  * @pdev_handle: DP_PDEV handle
6019  *
6020  * Return: void
6021  */
6022 #define STATS_PROC_TIMEOUT        (HZ/1000)
6023 
6024 static void
6025 dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
6026 {
6027 	struct dp_vdev *vdev;
6028 	struct dp_peer *peer;
6029 	uint32_t waitcnt;
6030 
6031 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
6032 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6033 			if (!peer) {
6034 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6035 					FL("DP Invalid Peer refernce"));
6036 				return;
6037 			}
6038 
6039 			if (peer->delete_in_progress) {
6040 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6041 					FL("DP Peer deletion in progress"));
6042 				continue;
6043 			}
6044 			qdf_atomic_inc(&peer->ref_cnt);
6045 			waitcnt = 0;
6046 			dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
6047 			while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
6048 				&& waitcnt < 10) {
6049 				schedule_timeout_interruptible(
6050 						STATS_PROC_TIMEOUT);
6051 				waitcnt++;
6052 			}
6053 			qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
6054 			dp_peer_unref_delete(peer);
6055 		}
6056 	}
6057 }
6058 
6059 /**
6060  * dp_rx_bar_stats_cb(): BAR received stats callback
6061  * @soc: SOC handle
6062  * @cb_ctxt: Call back context
6063  * @reo_status: Reo status
6064  *
6065  * return: void
6066  */
6067 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
6068 	union hal_reo_status *reo_status)
6069 {
6070 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
6071 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
6072 
6073 	if (!qdf_atomic_read(&soc->cmn_init_done))
6074 		return;
6075 
6076 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
6077 		DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
6078 			queue_status->header.status);
6079 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
6080 		return;
6081 	}
6082 
6083 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
6084 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
6085 
6086 }
6087 
6088 /**
6089  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
6090  * @vdev: DP VDEV handle
6091  *
6092  * return: void
6093  */
6094 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
6095 			     struct cdp_vdev_stats *vdev_stats)
6096 {
6097 	struct dp_peer *peer = NULL;
6098 	struct dp_soc *soc = NULL;
6099 
6100 	if (!vdev || !vdev->pdev)
6101 		return;
6102 
6103 	soc = vdev->pdev->soc;
6104 
6105 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
6106 
6107 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
6108 		dp_update_vdev_stats(vdev_stats, peer);
6109 
6110 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6111 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
6112 			     vdev_stats, vdev->vdev_id,
6113 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
6114 #endif
6115 }
6116 
6117 /**
6118  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
6119  * @pdev: DP PDEV handle
6120  *
6121  * return: void
6122  */
6123 static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
6124 {
6125 	struct dp_vdev *vdev = NULL;
6126 	struct dp_soc *soc;
6127 	struct cdp_vdev_stats *vdev_stats =
6128 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
6129 
6130 	if (!vdev_stats) {
6131 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6132 			  "DP alloc failure - unable to get alloc vdev stats");
6133 		return;
6134 	}
6135 
6136 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
6137 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
6138 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
6139 
6140 	if (pdev->mcopy_mode)
6141 		DP_UPDATE_STATS(pdev, pdev->invalid_peer);
6142 
6143 	soc = pdev->soc;
6144 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
6145 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6146 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
6147 
6148 		dp_aggregate_vdev_stats(vdev, vdev_stats);
6149 		dp_update_pdev_stats(pdev, vdev_stats);
6150 		dp_update_pdev_ingress_stats(pdev, vdev);
6151 	}
6152 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6153 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6154 	qdf_mem_free(vdev_stats);
6155 
6156 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6157 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
6158 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
6159 #endif
6160 }
6161 
6162 /**
6163  * dp_vdev_getstats() - get vdev packet level stats
6164  * @vdev_handle: Datapath VDEV handle
6165  * @stats: cdp network device stats structure
6166  *
6167  * Return: void
6168  */
6169 static void dp_vdev_getstats(void *vdev_handle,
6170 		struct cdp_dev_stats *stats)
6171 {
6172 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6173 	struct dp_pdev *pdev;
6174 	struct dp_soc *soc;
6175 	struct cdp_vdev_stats *vdev_stats;
6176 
6177 	if (!vdev)
6178 		return;
6179 
6180 	pdev = vdev->pdev;
6181 	if (!pdev)
6182 		return;
6183 
6184 	soc = pdev->soc;
6185 
6186 	vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
6187 
6188 	if (!vdev_stats) {
6189 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6190 			  "DP alloc failure - unable to get alloc vdev stats");
6191 		return;
6192 	}
6193 
6194 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
6195 	dp_aggregate_vdev_stats(vdev, vdev_stats);
6196 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6197 
6198 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
6199 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
6200 
6201 	stats->tx_errors = vdev_stats->tx.tx_failed +
6202 		vdev_stats->tx_i.dropped.dropped_pkt.num;
6203 	stats->tx_dropped = stats->tx_errors;
6204 
6205 	stats->rx_packets = vdev_stats->rx.unicast.num +
6206 		vdev_stats->rx.multicast.num +
6207 		vdev_stats->rx.bcast.num;
6208 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
6209 		vdev_stats->rx.multicast.bytes +
6210 		vdev_stats->rx.bcast.bytes;
6211 
6212 }
6213 
6214 
6215 /**
6216  * dp_pdev_getstats() - get pdev packet level stats
6217  * @pdev_handle: Datapath PDEV handle
6218  * @stats: cdp network device stats structure
6219  *
6220  * Return: void
6221  */
6222 static void dp_pdev_getstats(void *pdev_handle,
6223 		struct cdp_dev_stats *stats)
6224 {
6225 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6226 
6227 	dp_aggregate_pdev_stats(pdev);
6228 
6229 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
6230 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
6231 
6232 	stats->tx_errors = pdev->stats.tx.tx_failed +
6233 		pdev->stats.tx_i.dropped.dropped_pkt.num;
6234 	stats->tx_dropped = stats->tx_errors;
6235 
6236 	stats->rx_packets = pdev->stats.rx.unicast.num +
6237 		pdev->stats.rx.multicast.num +
6238 		pdev->stats.rx.bcast.num;
6239 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
6240 		pdev->stats.rx.multicast.bytes +
6241 		pdev->stats.rx.bcast.bytes;
6242 }
6243 
6244 /**
6245  * dp_get_device_stats() - get interface level packet stats
6246  * @handle: device handle
6247  * @stats: cdp network device stats structure
6248  * @type: device type pdev/vdev
6249  *
6250  * Return: void
6251  */
6252 static void dp_get_device_stats(void *handle,
6253 		struct cdp_dev_stats *stats, uint8_t type)
6254 {
6255 	switch (type) {
6256 	case UPDATE_VDEV_STATS:
6257 		dp_vdev_getstats(handle, stats);
6258 		break;
6259 	case UPDATE_PDEV_STATS:
6260 		dp_pdev_getstats(handle, stats);
6261 		break;
6262 	default:
6263 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6264 			"apstats cannot be updated for this input "
6265 			"type %d", type);
6266 		break;
6267 	}
6268 
6269 }
6270 
6271 
6272 /**
6273  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
6274  * @pdev: DP_PDEV Handle
6275  *
6276  * Return:void
6277  */
6278 static inline void
6279 dp_print_pdev_tx_stats(struct dp_pdev *pdev)
6280 {
6281 	uint8_t index = 0;
6282 
6283 	DP_PRINT_STATS("PDEV Tx Stats:\n");
6284 	DP_PRINT_STATS("Received From Stack:");
6285 	DP_PRINT_STATS("	Packets = %d",
6286 			pdev->stats.tx_i.rcvd.num);
6287 	DP_PRINT_STATS("	Bytes = %llu",
6288 			pdev->stats.tx_i.rcvd.bytes);
6289 	DP_PRINT_STATS("Processed:");
6290 	DP_PRINT_STATS("	Packets = %d",
6291 			pdev->stats.tx_i.processed.num);
6292 	DP_PRINT_STATS("	Bytes = %llu",
6293 			pdev->stats.tx_i.processed.bytes);
6294 	DP_PRINT_STATS("Total Completions:");
6295 	DP_PRINT_STATS("	Packets = %u",
6296 			pdev->stats.tx.comp_pkt.num);
6297 	DP_PRINT_STATS("	Bytes = %llu",
6298 			pdev->stats.tx.comp_pkt.bytes);
6299 	DP_PRINT_STATS("Successful Completions:");
6300 	DP_PRINT_STATS("	Packets = %u",
6301 			pdev->stats.tx.tx_success.num);
6302 	DP_PRINT_STATS("	Bytes = %llu",
6303 			pdev->stats.tx.tx_success.bytes);
6304 	DP_PRINT_STATS("Dropped:");
6305 	DP_PRINT_STATS("	Total = %d",
6306 			pdev->stats.tx_i.dropped.dropped_pkt.num);
6307 	DP_PRINT_STATS("	Dma_map_error = %d",
6308 			pdev->stats.tx_i.dropped.dma_error);
6309 	DP_PRINT_STATS("	Ring Full = %d",
6310 			pdev->stats.tx_i.dropped.ring_full);
6311 	DP_PRINT_STATS("	Descriptor Not available = %d",
6312 			pdev->stats.tx_i.dropped.desc_na.num);
6313 	DP_PRINT_STATS("	HW enqueue failed= %d",
6314 			pdev->stats.tx_i.dropped.enqueue_fail);
6315 	DP_PRINT_STATS("	Resources Full = %d",
6316 			pdev->stats.tx_i.dropped.res_full);
6317 	DP_PRINT_STATS("	FW removed Pkts = %u",
6318 		       pdev->stats.tx.dropped.fw_rem.num);
6319 	DP_PRINT_STATS("	FW removed bytes= %llu",
6320 		       pdev->stats.tx.dropped.fw_rem.bytes);
6321 	DP_PRINT_STATS("	FW removed transmitted = %d",
6322 			pdev->stats.tx.dropped.fw_rem_tx);
6323 	DP_PRINT_STATS("	FW removed untransmitted = %d",
6324 			pdev->stats.tx.dropped.fw_rem_notx);
6325 	DP_PRINT_STATS("	FW removed untransmitted fw_reason1 = %d",
6326 			pdev->stats.tx.dropped.fw_reason1);
6327 	DP_PRINT_STATS("	FW removed untransmitted fw_reason2 = %d",
6328 			pdev->stats.tx.dropped.fw_reason2);
6329 	DP_PRINT_STATS("	FW removed untransmitted fw_reason3 = %d",
6330 			pdev->stats.tx.dropped.fw_reason3);
6331 	DP_PRINT_STATS("	Aged Out from msdu/mpdu queues = %d",
6332 			pdev->stats.tx.dropped.age_out);
6333 	DP_PRINT_STATS("	headroom insufficient = %d",
6334 			pdev->stats.tx_i.dropped.headroom_insufficient);
6335 	DP_PRINT_STATS("	Multicast:");
6336 	DP_PRINT_STATS("	Packets: %u",
6337 		       pdev->stats.tx.mcast.num);
6338 	DP_PRINT_STATS("	Bytes: %llu",
6339 		       pdev->stats.tx.mcast.bytes);
6340 	DP_PRINT_STATS("Scatter Gather:");
6341 	DP_PRINT_STATS("	Packets = %d",
6342 			pdev->stats.tx_i.sg.sg_pkt.num);
6343 	DP_PRINT_STATS("	Bytes = %llu",
6344 			pdev->stats.tx_i.sg.sg_pkt.bytes);
6345 	DP_PRINT_STATS("	Dropped By Host = %d",
6346 			pdev->stats.tx_i.sg.dropped_host.num);
6347 	DP_PRINT_STATS("	Dropped By Target = %d",
6348 			pdev->stats.tx_i.sg.dropped_target);
6349 	DP_PRINT_STATS("TSO:");
6350 	DP_PRINT_STATS("	Number of Segments = %d",
6351 			pdev->stats.tx_i.tso.num_seg);
6352 	DP_PRINT_STATS("	Packets = %d",
6353 			pdev->stats.tx_i.tso.tso_pkt.num);
6354 	DP_PRINT_STATS("	Bytes = %llu",
6355 			pdev->stats.tx_i.tso.tso_pkt.bytes);
6356 	DP_PRINT_STATS("	Dropped By Host = %d",
6357 			pdev->stats.tx_i.tso.dropped_host.num);
6358 	DP_PRINT_STATS("Mcast Enhancement:");
6359 	DP_PRINT_STATS("	Packets = %d",
6360 			pdev->stats.tx_i.mcast_en.mcast_pkt.num);
6361 	DP_PRINT_STATS("	Bytes = %llu",
6362 			pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
6363 	DP_PRINT_STATS("	Dropped: Map Errors = %d",
6364 			pdev->stats.tx_i.mcast_en.dropped_map_error);
6365 	DP_PRINT_STATS("	Dropped: Self Mac = %d",
6366 			pdev->stats.tx_i.mcast_en.dropped_self_mac);
6367 	DP_PRINT_STATS("	Dropped: Send Fail = %d",
6368 			pdev->stats.tx_i.mcast_en.dropped_send_fail);
6369 	DP_PRINT_STATS("	Unicast sent = %d",
6370 			pdev->stats.tx_i.mcast_en.ucast);
6371 	DP_PRINT_STATS("Raw:");
6372 	DP_PRINT_STATS("	Packets = %d",
6373 			pdev->stats.tx_i.raw.raw_pkt.num);
6374 	DP_PRINT_STATS("	Bytes = %llu",
6375 			pdev->stats.tx_i.raw.raw_pkt.bytes);
6376 	DP_PRINT_STATS("	DMA map error = %d",
6377 			pdev->stats.tx_i.raw.dma_map_error);
6378 	DP_PRINT_STATS("Reinjected:");
6379 	DP_PRINT_STATS("	Packets = %d",
6380 			pdev->stats.tx_i.reinject_pkts.num);
6381 	DP_PRINT_STATS("	Bytes = %llu\n",
6382 			pdev->stats.tx_i.reinject_pkts.bytes);
6383 	DP_PRINT_STATS("Inspected:");
6384 	DP_PRINT_STATS("	Packets = %d",
6385 			pdev->stats.tx_i.inspect_pkts.num);
6386 	DP_PRINT_STATS("	Bytes = %llu",
6387 			pdev->stats.tx_i.inspect_pkts.bytes);
6388 	DP_PRINT_STATS("Nawds Multicast:");
6389 	DP_PRINT_STATS("	Packets = %d",
6390 			pdev->stats.tx_i.nawds_mcast.num);
6391 	DP_PRINT_STATS("	Bytes = %llu",
6392 			pdev->stats.tx_i.nawds_mcast.bytes);
6393 	DP_PRINT_STATS("CCE Classified:");
6394 	DP_PRINT_STATS("	CCE Classified Packets: %u",
6395 			pdev->stats.tx_i.cce_classified);
6396 	DP_PRINT_STATS("	RAW CCE Classified Packets: %u",
6397 			pdev->stats.tx_i.cce_classified_raw);
6398 	DP_PRINT_STATS("Mesh stats:");
6399 	DP_PRINT_STATS("	frames to firmware: %u",
6400 			pdev->stats.tx_i.mesh.exception_fw);
6401 	DP_PRINT_STATS("	completions from fw: %u",
6402 			pdev->stats.tx_i.mesh.completion_fw);
6403 	DP_PRINT_STATS("PPDU stats counter");
6404 	for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
6405 		DP_PRINT_STATS("	Tag[%d] = %llu", index,
6406 				pdev->stats.ppdu_stats_counter[index]);
6407 	}
6408 
6409 }
6410 
6411 /**
6412  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
6413  * @pdev: DP_PDEV Handle
6414  *
6415  * Return: void
6416  */
6417 static inline void
6418 dp_print_pdev_rx_stats(struct dp_pdev *pdev)
6419 {
6420 	DP_PRINT_STATS("PDEV Rx Stats:\n");
6421 	DP_PRINT_STATS("Received From HW (Per Rx Ring):");
6422 	DP_PRINT_STATS("	Packets = %d %d %d %d",
6423 			pdev->stats.rx.rcvd_reo[0].num,
6424 			pdev->stats.rx.rcvd_reo[1].num,
6425 			pdev->stats.rx.rcvd_reo[2].num,
6426 			pdev->stats.rx.rcvd_reo[3].num);
6427 	DP_PRINT_STATS("	Bytes = %llu %llu %llu %llu",
6428 			pdev->stats.rx.rcvd_reo[0].bytes,
6429 			pdev->stats.rx.rcvd_reo[1].bytes,
6430 			pdev->stats.rx.rcvd_reo[2].bytes,
6431 			pdev->stats.rx.rcvd_reo[3].bytes);
6432 	DP_PRINT_STATS("Replenished:");
6433 	DP_PRINT_STATS("	Packets = %d",
6434 			pdev->stats.replenish.pkts.num);
6435 	DP_PRINT_STATS("	Bytes = %llu",
6436 			pdev->stats.replenish.pkts.bytes);
6437 	DP_PRINT_STATS("	Buffers Added To Freelist = %d",
6438 			pdev->stats.buf_freelist);
6439 	DP_PRINT_STATS("	Low threshold intr = %d",
6440 			pdev->stats.replenish.low_thresh_intrs);
6441 	DP_PRINT_STATS("Dropped:");
6442 	DP_PRINT_STATS("	msdu_not_done = %d",
6443 			pdev->stats.dropped.msdu_not_done);
6444 	DP_PRINT_STATS("        mon_rx_drop = %d",
6445 			pdev->stats.dropped.mon_rx_drop);
6446 	DP_PRINT_STATS("        mec_drop = %d",
6447 		       pdev->stats.rx.mec_drop.num);
6448 	DP_PRINT_STATS("	Bytes = %llu",
6449 		       pdev->stats.rx.mec_drop.bytes);
6450 	DP_PRINT_STATS("Sent To Stack:");
6451 	DP_PRINT_STATS("	Packets = %d",
6452 			pdev->stats.rx.to_stack.num);
6453 	DP_PRINT_STATS("	Bytes = %llu",
6454 			pdev->stats.rx.to_stack.bytes);
6455 	DP_PRINT_STATS("Multicast/Broadcast:");
6456 	DP_PRINT_STATS("	Packets = %d",
6457 			pdev->stats.rx.multicast.num);
6458 	DP_PRINT_STATS("	Bytes = %llu",
6459 			pdev->stats.rx.multicast.bytes);
6460 	DP_PRINT_STATS("Errors:");
6461 	DP_PRINT_STATS("	Rxdma Ring Un-inititalized = %d",
6462 			pdev->stats.replenish.rxdma_err);
6463 	DP_PRINT_STATS("	Desc Alloc Failed: = %d",
6464 			pdev->stats.err.desc_alloc_fail);
6465 	DP_PRINT_STATS("	IP checksum error = %d",
6466 		       pdev->stats.err.ip_csum_err);
6467 	DP_PRINT_STATS("	TCP/UDP checksum error = %d",
6468 		       pdev->stats.err.tcp_udp_csum_err);
6469 
6470 	/* Get bar_recv_cnt */
6471 	dp_aggregate_pdev_ctrl_frames_stats(pdev);
6472 	DP_PRINT_STATS("BAR Received Count: = %d",
6473 			pdev->stats.rx.bar_recv_cnt);
6474 
6475 }
6476 
6477 /**
6478  * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
6479  * @pdev: DP_PDEV Handle
6480  *
6481  * Return: void
6482  */
6483 static inline void
6484 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
6485 {
6486 	struct cdp_pdev_mon_stats *rx_mon_stats;
6487 
6488 	rx_mon_stats = &pdev->rx_mon_stats;
6489 
6490 	DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
6491 
6492 	dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
6493 
6494 	DP_PRINT_STATS("status_ppdu_done_cnt = %d",
6495 		       rx_mon_stats->status_ppdu_done);
6496 	DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
6497 		       rx_mon_stats->dest_ppdu_done);
6498 	DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
6499 		       rx_mon_stats->dest_mpdu_done);
6500 	DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
6501 		       rx_mon_stats->dest_mpdu_drop);
6502 	DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d",
6503 		       rx_mon_stats->dup_mon_linkdesc_cnt);
6504 	DP_PRINT_STATS("dup_mon_buf_cnt = %d",
6505 		       rx_mon_stats->dup_mon_buf_cnt);
6506 }
6507 
6508 /**
6509  * dp_print_soc_tx_stats(): Print SOC level  stats
6510  * @soc DP_SOC Handle
6511  *
6512  * Return: void
6513  */
6514 static inline void
6515 dp_print_soc_tx_stats(struct dp_soc *soc)
6516 {
6517 	uint8_t desc_pool_id;
6518 	soc->stats.tx.desc_in_use = 0;
6519 
6520 	DP_PRINT_STATS("SOC Tx Stats:\n");
6521 
6522 	for (desc_pool_id = 0;
6523 	     desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6524 	     desc_pool_id++)
6525 		soc->stats.tx.desc_in_use +=
6526 			soc->tx_desc[desc_pool_id].num_allocated;
6527 
6528 	DP_PRINT_STATS("Tx Descriptors In Use = %d",
6529 			soc->stats.tx.desc_in_use);
6530 	DP_PRINT_STATS("Tx Invalid peer:");
6531 	DP_PRINT_STATS("	Packets = %d",
6532 			soc->stats.tx.tx_invalid_peer.num);
6533 	DP_PRINT_STATS("	Bytes = %llu",
6534 			soc->stats.tx.tx_invalid_peer.bytes);
6535 	DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
6536 			soc->stats.tx.tcl_ring_full[0],
6537 			soc->stats.tx.tcl_ring_full[1],
6538 			soc->stats.tx.tcl_ring_full[2]);
6539 
6540 }
6541 /**
6542  * dp_print_soc_rx_stats: Print SOC level Rx stats
6543  * @soc: DP_SOC Handle
6544  *
6545  * Return:void
6546  */
6547 static inline void
6548 dp_print_soc_rx_stats(struct dp_soc *soc)
6549 {
6550 	uint32_t i;
6551 	char reo_error[DP_REO_ERR_LENGTH];
6552 	char rxdma_error[DP_RXDMA_ERR_LENGTH];
6553 	uint8_t index = 0;
6554 
6555 	DP_PRINT_STATS("No of AST Entries = %d", soc->num_ast_entries);
6556 	DP_PRINT_STATS("SOC Rx Stats:\n");
6557 	DP_PRINT_STATS("Fragmented packets: %u",
6558 		       soc->stats.rx.rx_frags);
6559 	DP_PRINT_STATS("Reo reinjected packets: %u",
6560 		       soc->stats.rx.reo_reinject);
6561 	DP_PRINT_STATS("Errors:\n");
6562 	DP_PRINT_STATS("Rx Decrypt Errors = %d",
6563 			(soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
6564 			soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
6565 	DP_PRINT_STATS("Invalid RBM = %d",
6566 			soc->stats.rx.err.invalid_rbm);
6567 	DP_PRINT_STATS("Invalid Vdev = %d",
6568 			soc->stats.rx.err.invalid_vdev);
6569 	DP_PRINT_STATS("Invalid sa_idx or da_idx = %d",
6570 		       soc->stats.rx.err.invalid_sa_da_idx);
6571 	DP_PRINT_STATS("Invalid Pdev = %d",
6572 			soc->stats.rx.err.invalid_pdev);
6573 	DP_PRINT_STATS("Invalid Peer = %d",
6574 			soc->stats.rx.err.rx_invalid_peer.num);
6575 	DP_PRINT_STATS("HAL Ring Access Fail = %d",
6576 			soc->stats.rx.err.hal_ring_access_fail);
6577 	DP_PRINT_STATS("RX frags: %d", soc->stats.rx.rx_frags);
6578 	DP_PRINT_STATS("RX frag wait: %d", soc->stats.rx.rx_frag_wait);
6579 	DP_PRINT_STATS("RX frag err: %d", soc->stats.rx.rx_frag_err);
6580 	DP_PRINT_STATS("RX HP out_of_sync: %d", soc->stats.rx.hp_oos);
6581 	DP_PRINT_STATS("RX DUP DESC: %d",
6582 		       soc->stats.rx.err.hal_reo_dest_dup);
6583 	DP_PRINT_STATS("RX REL DUP DESC: %d",
6584 		       soc->stats.rx.err.hal_wbm_rel_dup);
6585 
6586 	for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
6587 		index += qdf_snprint(&rxdma_error[index],
6588 				DP_RXDMA_ERR_LENGTH - index,
6589 				" %d", soc->stats.rx.err.rxdma_error[i]);
6590 	}
6591 	DP_PRINT_STATS("RXDMA Error (0-31):%s",
6592 			rxdma_error);
6593 
6594 	index = 0;
6595 	for (i = 0; i < HAL_REO_ERR_MAX; i++) {
6596 		index += qdf_snprint(&reo_error[index],
6597 				DP_REO_ERR_LENGTH - index,
6598 				" %d", soc->stats.rx.err.reo_error[i]);
6599 	}
6600 	DP_PRINT_STATS("REO Error(0-14):%s",
6601 			reo_error);
6602 }
6603 
6604 /**
6605  * dp_srng_get_str_from_ring_type() - Return string name for a ring
6606  * @ring_type: Ring
6607  *
6608  * Return: char const pointer
6609  */
6610 static inline const
6611 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
6612 {
6613 	switch (ring_type) {
6614 	case REO_DST:
6615 		return "Reo_dst";
6616 	case REO_EXCEPTION:
6617 		return "Reo_exception";
6618 	case REO_CMD:
6619 		return "Reo_cmd";
6620 	case REO_REINJECT:
6621 		return "Reo_reinject";
6622 	case REO_STATUS:
6623 		return "Reo_status";
6624 	case WBM2SW_RELEASE:
6625 		return "wbm2sw_release";
6626 	case TCL_DATA:
6627 		return "tcl_data";
6628 	case TCL_CMD:
6629 		return "tcl_cmd";
6630 	case TCL_STATUS:
6631 		return "tcl_status";
6632 	case SW2WBM_RELEASE:
6633 		return "sw2wbm_release";
6634 	case RXDMA_BUF:
6635 		return "Rxdma_buf";
6636 	case RXDMA_DST:
6637 		return "Rxdma_dst";
6638 	case RXDMA_MONITOR_BUF:
6639 		return "Rxdma_monitor_buf";
6640 	case RXDMA_MONITOR_DESC:
6641 		return "Rxdma_monitor_desc";
6642 	case RXDMA_MONITOR_STATUS:
6643 		return "Rxdma_monitor_status";
6644 	default:
6645 		dp_err("Invalid ring type");
6646 		break;
6647 	}
6648 	return "Invalid";
6649 }
6650 
6651 /**
6652  * dp_print_ring_stat_from_hal(): Print hal level ring stats
6653  * @soc: DP_SOC handle
6654  * @srng: DP_SRNG handle
6655  * @ring_name: SRNG name
6656  * @ring_type: srng src/dst ring
6657  *
6658  * Return: void
6659  */
6660 static void
6661 dp_print_ring_stat_from_hal(struct dp_soc *soc,  struct dp_srng *srng,
6662 			    enum hal_ring_type ring_type)
6663 {
6664 	uint32_t tailp;
6665 	uint32_t headp;
6666 	int32_t hw_headp = -1;
6667 	int32_t hw_tailp = -1;
6668 	const char *ring_name;
6669 	struct hal_soc *hal_soc;
6670 
6671 	if (soc && srng && srng->hal_srng) {
6672 		hal_soc = (struct hal_soc *)soc->hal_soc;
6673 		ring_name = dp_srng_get_str_from_hal_ring_type(ring_type);
6674 
6675 		hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
6676 
6677 		DP_PRINT_STATS("%s:SW:Head pointer = %d Tail Pointer = %d\n",
6678 			       ring_name, headp, tailp);
6679 
6680 		hal_get_hw_hptp(hal_soc, srng->hal_srng, &hw_headp,
6681 				&hw_tailp, ring_type);
6682 
6683 		DP_PRINT_STATS("%s:HW:Head pointer = %d Tail Pointer = %d\n",
6684 			       ring_name, hw_headp, hw_tailp);
6685 	}
6686 
6687 }
6688 
6689 /**
6690  * dp_print_mon_ring_stats_from_hal() - Print stat for monitor rings based
6691  *					on target
6692  * @pdev: physical device handle
6693  * @mac_id: mac id
6694  *
6695  * Return: void
6696  */
6697 static inline
6698 void dp_print_mon_ring_stat_from_hal(struct dp_pdev *pdev, uint8_t mac_id)
6699 {
6700 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) {
6701 		dp_print_ring_stat_from_hal(pdev->soc,
6702 					    &pdev->rxdma_mon_buf_ring[mac_id],
6703 					    RXDMA_MONITOR_BUF);
6704 		dp_print_ring_stat_from_hal(pdev->soc,
6705 					    &pdev->rxdma_mon_dst_ring[mac_id],
6706 					    RXDMA_MONITOR_DST);
6707 		dp_print_ring_stat_from_hal(pdev->soc,
6708 					    &pdev->rxdma_mon_desc_ring[mac_id],
6709 					    RXDMA_MONITOR_DESC);
6710 	}
6711 
6712 	dp_print_ring_stat_from_hal(pdev->soc,
6713 				    &pdev->rxdma_mon_status_ring[mac_id],
6714 				    RXDMA_MONITOR_STATUS);
6715 }
6716 
6717 /**
6718  * dp_print_ring_stats(): Print tail and head pointer
6719  * @pdev: DP_PDEV handle
6720  *
6721  * Return:void
6722  */
6723 static inline void
6724 dp_print_ring_stats(struct dp_pdev *pdev)
6725 {
6726 	uint32_t i;
6727 	int mac_id;
6728 
6729 	dp_print_ring_stat_from_hal(pdev->soc,
6730 				    &pdev->soc->reo_exception_ring,
6731 				    REO_EXCEPTION);
6732 	dp_print_ring_stat_from_hal(pdev->soc,
6733 				    &pdev->soc->reo_reinject_ring,
6734 				    REO_REINJECT);
6735 	dp_print_ring_stat_from_hal(pdev->soc,
6736 				    &pdev->soc->reo_cmd_ring,
6737 				    REO_CMD);
6738 	dp_print_ring_stat_from_hal(pdev->soc,
6739 				    &pdev->soc->reo_status_ring,
6740 				    REO_STATUS);
6741 	dp_print_ring_stat_from_hal(pdev->soc,
6742 				    &pdev->soc->rx_rel_ring,
6743 				    WBM2SW_RELEASE);
6744 	dp_print_ring_stat_from_hal(pdev->soc,
6745 				    &pdev->soc->tcl_cmd_ring,
6746 				    TCL_CMD);
6747 	dp_print_ring_stat_from_hal(pdev->soc,
6748 				    &pdev->soc->tcl_status_ring,
6749 				    TCL_STATUS);
6750 	dp_print_ring_stat_from_hal(pdev->soc,
6751 				    &pdev->soc->wbm_desc_rel_ring,
6752 				    SW2WBM_RELEASE);
6753 	for (i = 0; i < MAX_REO_DEST_RINGS; i++)
6754 		dp_print_ring_stat_from_hal(pdev->soc,
6755 					    &pdev->soc->reo_dest_ring[i],
6756 					    REO_DST);
6757 
6758 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++)
6759 		dp_print_ring_stat_from_hal(pdev->soc,
6760 					    &pdev->soc->tcl_data_ring[i],
6761 					    TCL_DATA);
6762 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++)
6763 		dp_print_ring_stat_from_hal(pdev->soc,
6764 					    &pdev->soc->tx_comp_ring[i],
6765 					    WBM2SW_RELEASE);
6766 
6767 	dp_print_ring_stat_from_hal(pdev->soc,
6768 				    &pdev->rx_refill_buf_ring,
6769 				    RXDMA_BUF);
6770 
6771 	dp_print_ring_stat_from_hal(pdev->soc,
6772 				    &pdev->rx_refill_buf_ring2,
6773 				    RXDMA_BUF);
6774 
6775 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
6776 		dp_print_ring_stat_from_hal(pdev->soc,
6777 					    &pdev->rx_mac_buf_ring[i],
6778 					    RXDMA_BUF);
6779 
6780 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
6781 		dp_print_mon_ring_stat_from_hal(pdev, mac_id);
6782 
6783 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++)
6784 		dp_print_ring_stat_from_hal(pdev->soc,
6785 					    &pdev->rxdma_err_dst_ring[i],
6786 					    RXDMA_DST);
6787 
6788 }
6789 
6790 /**
6791  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
6792  * @vdev: DP_VDEV handle
6793  *
6794  * Return:void
6795  */
6796 static inline void
6797 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
6798 {
6799 	struct dp_peer *peer = NULL;
6800 
6801 	if (!vdev || !vdev->pdev)
6802 		return;
6803 
6804 	DP_STATS_CLR(vdev->pdev);
6805 	DP_STATS_CLR(vdev->pdev->soc);
6806 	DP_STATS_CLR(vdev);
6807 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6808 		if (!peer)
6809 			return;
6810 		DP_STATS_CLR(peer);
6811 
6812 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6813 		dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
6814 				     &peer->stats,  peer->peer_ids[0],
6815 				     UPDATE_PEER_STATS, vdev->pdev->pdev_id);
6816 #endif
6817 	}
6818 
6819 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6820 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
6821 			     &vdev->stats,  vdev->vdev_id,
6822 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
6823 #endif
6824 }
6825 
6826 /**
6827  * dp_print_common_rates_info(): Print common rate for tx or rx
6828  * @pkt_type_array: rate type array contains rate info
6829  *
6830  * Return:void
6831  */
6832 static inline void
6833 dp_print_common_rates_info(struct cdp_pkt_type *pkt_type_array)
6834 {
6835 	uint8_t mcs, pkt_type;
6836 
6837 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
6838 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
6839 			if (!dp_rate_string[pkt_type][mcs].valid)
6840 				continue;
6841 
6842 			DP_PRINT_STATS("	%s = %d",
6843 				       dp_rate_string[pkt_type][mcs].mcs_type,
6844 				       pkt_type_array[pkt_type].mcs_count[mcs]);
6845 		}
6846 
6847 		DP_PRINT_STATS("\n");
6848 	}
6849 }
6850 
6851 /**
6852  * dp_print_rx_rates(): Print Rx rate stats
6853  * @vdev: DP_VDEV handle
6854  *
6855  * Return:void
6856  */
6857 static inline void
6858 dp_print_rx_rates(struct dp_vdev *vdev)
6859 {
6860 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6861 	uint8_t i;
6862 	uint8_t index = 0;
6863 	char nss[DP_NSS_LENGTH];
6864 
6865 	DP_PRINT_STATS("Rx Rate Info:\n");
6866 	dp_print_common_rates_info(pdev->stats.rx.pkt_type);
6867 
6868 
6869 	index = 0;
6870 	for (i = 0; i < SS_COUNT; i++) {
6871 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6872 				" %d", pdev->stats.rx.nss[i]);
6873 	}
6874 	DP_PRINT_STATS("NSS(1-8) = %s",
6875 			nss);
6876 
6877 	DP_PRINT_STATS("SGI ="
6878 			" 0.8us %d,"
6879 			" 0.4us %d,"
6880 			" 1.6us %d,"
6881 			" 3.2us %d,",
6882 			pdev->stats.rx.sgi_count[0],
6883 			pdev->stats.rx.sgi_count[1],
6884 			pdev->stats.rx.sgi_count[2],
6885 			pdev->stats.rx.sgi_count[3]);
6886 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
6887 			pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
6888 			pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
6889 	DP_PRINT_STATS("Reception Type ="
6890 			" SU: %d,"
6891 			" MU_MIMO:%d,"
6892 			" MU_OFDMA:%d,"
6893 			" MU_OFDMA_MIMO:%d\n",
6894 			pdev->stats.rx.reception_type[0],
6895 			pdev->stats.rx.reception_type[1],
6896 			pdev->stats.rx.reception_type[2],
6897 			pdev->stats.rx.reception_type[3]);
6898 	DP_PRINT_STATS("Aggregation:\n");
6899 	DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
6900 			pdev->stats.rx.ampdu_cnt);
6901 	DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
6902 			pdev->stats.rx.non_ampdu_cnt);
6903 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
6904 			pdev->stats.rx.amsdu_cnt);
6905 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
6906 			pdev->stats.rx.non_amsdu_cnt);
6907 }
6908 
6909 /**
6910  * dp_print_tx_rates(): Print tx rates
6911  * @vdev: DP_VDEV handle
6912  *
6913  * Return:void
6914  */
6915 static inline void
6916 dp_print_tx_rates(struct dp_vdev *vdev)
6917 {
6918 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6919 	uint8_t index;
6920 	char nss[DP_NSS_LENGTH];
6921 	int nss_index;
6922 
6923 	DP_PRINT_STATS("Tx Rate Info:\n");
6924 	dp_print_common_rates_info(pdev->stats.tx.pkt_type);
6925 
6926 	DP_PRINT_STATS("SGI ="
6927 			" 0.8us %d"
6928 			" 0.4us %d"
6929 			" 1.6us %d"
6930 			" 3.2us %d",
6931 			pdev->stats.tx.sgi_count[0],
6932 			pdev->stats.tx.sgi_count[1],
6933 			pdev->stats.tx.sgi_count[2],
6934 			pdev->stats.tx.sgi_count[3]);
6935 
6936 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
6937 			pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
6938 			pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
6939 
6940 	index = 0;
6941 	for (nss_index = 0; nss_index < SS_COUNT; nss_index++) {
6942 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6943 				" %d", pdev->stats.tx.nss[nss_index]);
6944 	}
6945 
6946 	DP_PRINT_STATS("NSS(1-8) = %s", nss);
6947 	DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
6948 	DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
6949 	DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
6950 	DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
6951 	DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
6952 
6953 	DP_PRINT_STATS("Aggregation:\n");
6954 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
6955 			pdev->stats.tx.amsdu_cnt);
6956 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
6957 			pdev->stats.tx.non_amsdu_cnt);
6958 }
6959 
6960 /**
6961  * dp_print_peer_stats():print peer stats
6962  * @peer: DP_PEER handle
6963  *
6964  * return void
6965  */
6966 static inline void dp_print_peer_stats(struct dp_peer *peer)
6967 {
6968 	uint8_t i;
6969 	uint32_t index;
6970 	uint32_t j;
6971 	char nss[DP_NSS_LENGTH];
6972 	char mu_group_id[DP_MU_GROUP_LENGTH];
6973 
6974 	DP_PRINT_STATS("Node Tx Stats:\n");
6975 	DP_PRINT_STATS("Total Packet Completions = %d",
6976 			peer->stats.tx.comp_pkt.num);
6977 	DP_PRINT_STATS("Total Bytes Completions = %llu",
6978 			peer->stats.tx.comp_pkt.bytes);
6979 	DP_PRINT_STATS("Success Packets = %d",
6980 			peer->stats.tx.tx_success.num);
6981 	DP_PRINT_STATS("Success Bytes = %llu",
6982 			peer->stats.tx.tx_success.bytes);
6983 	DP_PRINT_STATS("Unicast Success Packets = %d",
6984 			peer->stats.tx.ucast.num);
6985 	DP_PRINT_STATS("Unicast Success Bytes = %llu",
6986 			peer->stats.tx.ucast.bytes);
6987 	DP_PRINT_STATS("Multicast Success Packets = %d",
6988 			peer->stats.tx.mcast.num);
6989 	DP_PRINT_STATS("Multicast Success Bytes = %llu",
6990 			peer->stats.tx.mcast.bytes);
6991 	DP_PRINT_STATS("Broadcast Success Packets = %d",
6992 			peer->stats.tx.bcast.num);
6993 	DP_PRINT_STATS("Broadcast Success Bytes = %llu",
6994 			peer->stats.tx.bcast.bytes);
6995 	DP_PRINT_STATS("Packets Failed = %d",
6996 			peer->stats.tx.tx_failed);
6997 	DP_PRINT_STATS("Packets In OFDMA = %d",
6998 			peer->stats.tx.ofdma);
6999 	DP_PRINT_STATS("Packets In STBC = %d",
7000 			peer->stats.tx.stbc);
7001 	DP_PRINT_STATS("Packets In LDPC = %d",
7002 			peer->stats.tx.ldpc);
7003 	DP_PRINT_STATS("Packet Retries = %d",
7004 			peer->stats.tx.retries);
7005 	DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
7006 			peer->stats.tx.amsdu_cnt);
7007 	DP_PRINT_STATS("Last Packet RSSI = %d",
7008 			peer->stats.tx.last_ack_rssi);
7009 	DP_PRINT_STATS("Dropped At FW: Removed Pkts = %u",
7010 		       peer->stats.tx.dropped.fw_rem.num);
7011 	DP_PRINT_STATS("Dropped At FW: Removed bytes = %llu",
7012 		       peer->stats.tx.dropped.fw_rem.bytes);
7013 	DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
7014 			peer->stats.tx.dropped.fw_rem_tx);
7015 	DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
7016 			peer->stats.tx.dropped.fw_rem_notx);
7017 	DP_PRINT_STATS("Dropped : Age Out = %d",
7018 			peer->stats.tx.dropped.age_out);
7019 	DP_PRINT_STATS("NAWDS : ");
7020 	DP_PRINT_STATS("	Nawds multicast Drop Tx Packet = %d",
7021 			peer->stats.tx.nawds_mcast_drop);
7022 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Count = %d",
7023 			peer->stats.tx.nawds_mcast.num);
7024 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Bytes = %llu",
7025 			peer->stats.tx.nawds_mcast.bytes);
7026 
7027 	DP_PRINT_STATS("Rate Info:");
7028 	dp_print_common_rates_info(peer->stats.tx.pkt_type);
7029 
7030 
7031 	DP_PRINT_STATS("SGI = "
7032 			" 0.8us %d"
7033 			" 0.4us %d"
7034 			" 1.6us %d"
7035 			" 3.2us %d",
7036 			peer->stats.tx.sgi_count[0],
7037 			peer->stats.tx.sgi_count[1],
7038 			peer->stats.tx.sgi_count[2],
7039 			peer->stats.tx.sgi_count[3]);
7040 	DP_PRINT_STATS("Excess Retries per AC ");
7041 	DP_PRINT_STATS("	 Best effort = %d",
7042 			peer->stats.tx.excess_retries_per_ac[0]);
7043 	DP_PRINT_STATS("	 Background= %d",
7044 			peer->stats.tx.excess_retries_per_ac[1]);
7045 	DP_PRINT_STATS("	 Video = %d",
7046 			peer->stats.tx.excess_retries_per_ac[2]);
7047 	DP_PRINT_STATS("	 Voice = %d",
7048 			peer->stats.tx.excess_retries_per_ac[3]);
7049 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
7050 			peer->stats.tx.bw[0], peer->stats.tx.bw[1],
7051 			peer->stats.tx.bw[2], peer->stats.tx.bw[3]);
7052 
7053 	index = 0;
7054 	for (i = 0; i < SS_COUNT; i++) {
7055 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
7056 				" %d", peer->stats.tx.nss[i]);
7057 	}
7058 	DP_PRINT_STATS("NSS(1-8) = %s", nss);
7059 
7060 	DP_PRINT_STATS("Transmit Type :");
7061 	DP_PRINT_STATS("SU %d, MU_MIMO %d, MU_OFDMA %d, MU_MIMO_OFDMA %d",
7062 		       peer->stats.tx.transmit_type[0],
7063 		       peer->stats.tx.transmit_type[1],
7064 		       peer->stats.tx.transmit_type[2],
7065 		       peer->stats.tx.transmit_type[3]);
7066 
7067 	for (i = 0; i < MAX_MU_GROUP_ID;) {
7068 		index = 0;
7069 		for (j = 0; j < DP_MU_GROUP_SHOW && i < MAX_MU_GROUP_ID;
7070 		     j++) {
7071 			index += qdf_snprint(&mu_group_id[index],
7072 					     DP_MU_GROUP_LENGTH - index,
7073 					     " %d",
7074 					     peer->stats.tx.mu_group_id[i]);
7075 			i++;
7076 		}
7077 
7078 		DP_PRINT_STATS("User position list for GID %02d->%d: [%s]",
7079 			       i - DP_MU_GROUP_SHOW, i - 1, mu_group_id);
7080 	}
7081 
7082 	DP_PRINT_STATS("Last Packet RU index [%d], Size [%d]",
7083 		       peer->stats.tx.ru_start, peer->stats.tx.ru_tones);
7084 	DP_PRINT_STATS("RU Locations RU[26 52 106 242 484 996]:");
7085 	DP_PRINT_STATS("RU_26: %d", peer->stats.tx.ru_loc[0]);
7086 	DP_PRINT_STATS("RU 52: %d", peer->stats.tx.ru_loc[1]);
7087 	DP_PRINT_STATS("RU 106: %d", peer->stats.tx.ru_loc[2]);
7088 	DP_PRINT_STATS("RU 242: %d", peer->stats.tx.ru_loc[3]);
7089 	DP_PRINT_STATS("RU 484: %d", peer->stats.tx.ru_loc[4]);
7090 	DP_PRINT_STATS("RU 996: %d", peer->stats.tx.ru_loc[5]);
7091 
7092 	DP_PRINT_STATS("Aggregation:");
7093 	DP_PRINT_STATS("	Number of Msdu's Part of Amsdu = %d",
7094 			peer->stats.tx.amsdu_cnt);
7095 	DP_PRINT_STATS("	Number of Msdu's With No Msdu Level Aggregation = %d\n",
7096 			peer->stats.tx.non_amsdu_cnt);
7097 
7098 	DP_PRINT_STATS("Bytes and Packets transmitted  in last one sec:");
7099 	DP_PRINT_STATS("	Bytes transmitted in last sec: %d",
7100 		       peer->stats.tx.tx_byte_rate);
7101 	DP_PRINT_STATS("	Data transmitted in last sec: %d",
7102 		       peer->stats.tx.tx_data_rate);
7103 
7104 	DP_PRINT_STATS("Node Rx Stats:");
7105 	DP_PRINT_STATS("Packets Sent To Stack = %d",
7106 			peer->stats.rx.to_stack.num);
7107 	DP_PRINT_STATS("Bytes Sent To Stack = %llu",
7108 			peer->stats.rx.to_stack.bytes);
7109 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
7110 		DP_PRINT_STATS("Ring Id = %d", i);
7111 		DP_PRINT_STATS("	Packets Received = %d",
7112 				peer->stats.rx.rcvd_reo[i].num);
7113 		DP_PRINT_STATS("	Bytes Received = %llu",
7114 				peer->stats.rx.rcvd_reo[i].bytes);
7115 	}
7116 	DP_PRINT_STATS("Multicast Packets Received = %d",
7117 			peer->stats.rx.multicast.num);
7118 	DP_PRINT_STATS("Multicast Bytes Received = %llu",
7119 			peer->stats.rx.multicast.bytes);
7120 	DP_PRINT_STATS("Broadcast Packets Received = %d",
7121 			peer->stats.rx.bcast.num);
7122 	DP_PRINT_STATS("Broadcast Bytes Received = %llu",
7123 			peer->stats.rx.bcast.bytes);
7124 	DP_PRINT_STATS("Intra BSS Packets Received = %d",
7125 			peer->stats.rx.intra_bss.pkts.num);
7126 	DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
7127 			peer->stats.rx.intra_bss.pkts.bytes);
7128 	DP_PRINT_STATS("Raw Packets Received = %d",
7129 			peer->stats.rx.raw.num);
7130 	DP_PRINT_STATS("Raw Bytes Received = %llu",
7131 			peer->stats.rx.raw.bytes);
7132 	DP_PRINT_STATS("Errors: MIC Errors = %d",
7133 			peer->stats.rx.err.mic_err);
7134 	DP_PRINT_STATS("Erros: Decryption Errors = %d",
7135 			peer->stats.rx.err.decrypt_err);
7136 	DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
7137 			peer->stats.rx.non_ampdu_cnt);
7138 	DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
7139 			peer->stats.rx.ampdu_cnt);
7140 	DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
7141 			peer->stats.rx.non_amsdu_cnt);
7142 	DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
7143 			peer->stats.rx.amsdu_cnt);
7144 	DP_PRINT_STATS("NAWDS : ");
7145 	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet = %d",
7146 			peer->stats.rx.nawds_mcast_drop);
7147 	DP_PRINT_STATS("SGI ="
7148 			" 0.8us %d"
7149 			" 0.4us %d"
7150 			" 1.6us %d"
7151 			" 3.2us %d",
7152 			peer->stats.rx.sgi_count[0],
7153 			peer->stats.rx.sgi_count[1],
7154 			peer->stats.rx.sgi_count[2],
7155 			peer->stats.rx.sgi_count[3]);
7156 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
7157 			peer->stats.rx.bw[0], peer->stats.rx.bw[1],
7158 			peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
7159 	DP_PRINT_STATS("Reception Type ="
7160 			" SU %d,"
7161 			" MU_MIMO %d,"
7162 			" MU_OFDMA %d,"
7163 			" MU_OFDMA_MIMO %d",
7164 			peer->stats.rx.reception_type[0],
7165 			peer->stats.rx.reception_type[1],
7166 			peer->stats.rx.reception_type[2],
7167 			peer->stats.rx.reception_type[3]);
7168 
7169 	dp_print_common_rates_info(peer->stats.rx.pkt_type);
7170 
7171 	index = 0;
7172 	for (i = 0; i < SS_COUNT; i++) {
7173 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
7174 				" %d", peer->stats.rx.nss[i]);
7175 	}
7176 	DP_PRINT_STATS("NSS(1-8) = %s",
7177 			nss);
7178 
7179 	DP_PRINT_STATS("Aggregation:");
7180 	DP_PRINT_STATS("	Msdu's Part of Ampdu = %d",
7181 			peer->stats.rx.ampdu_cnt);
7182 	DP_PRINT_STATS("	Msdu's With No Mpdu Level Aggregation = %d",
7183 			peer->stats.rx.non_ampdu_cnt);
7184 	DP_PRINT_STATS("	Msdu's Part of Amsdu = %d",
7185 			peer->stats.rx.amsdu_cnt);
7186 	DP_PRINT_STATS("	Msdu's With No Msdu Level Aggregation = %d",
7187 			peer->stats.rx.non_amsdu_cnt);
7188 
7189 	DP_PRINT_STATS("Bytes and Packets received in last one sec:");
7190 	DP_PRINT_STATS("	Bytes received in last sec: %d",
7191 		       peer->stats.rx.rx_byte_rate);
7192 	DP_PRINT_STATS("	Data received in last sec: %d",
7193 		       peer->stats.rx.rx_data_rate);
7194 }
7195 
7196 /*
7197  * dp_get_host_peer_stats()- function to print peer stats
7198  * @pdev_handle: DP_PDEV handle
7199  * @mac_addr: mac address of the peer
7200  *
7201  * Return: void
7202  */
7203 static void
7204 dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
7205 {
7206 	struct dp_peer *peer;
7207 	uint8_t local_id;
7208 
7209 	if (!mac_addr) {
7210 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7211 			  "Invalid MAC address\n");
7212 		return;
7213 	}
7214 
7215 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
7216 			&local_id);
7217 
7218 	if (!peer) {
7219 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7220 			  "%s: Invalid peer\n", __func__);
7221 		return;
7222 	}
7223 
7224 	/* Making sure the peer is for the specific pdev */
7225 	if ((struct dp_pdev *)pdev_handle != peer->vdev->pdev) {
7226 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7227 			  "%s: Peer is not for this pdev\n", __func__);
7228 		return;
7229 	}
7230 
7231 	dp_print_peer_stats(peer);
7232 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
7233 }
7234 
7235 /**
7236  * dp_print_soc_cfg_params()- Dump soc wlan config parameters
7237  * @soc_handle: Soc handle
7238  *
7239  * Return: void
7240  */
7241 static void
7242 dp_print_soc_cfg_params(struct dp_soc *soc)
7243 {
7244 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
7245 	uint8_t index = 0, i = 0;
7246 	char ring_mask[DP_MAX_INT_CONTEXTS_STRING_LENGTH];
7247 	int num_of_int_contexts;
7248 
7249 	if (!soc) {
7250 		dp_err("Context is null");
7251 		return;
7252 	}
7253 
7254 	soc_cfg_ctx = soc->wlan_cfg_ctx;
7255 
7256 	if (!soc_cfg_ctx) {
7257 		dp_err("Context is null");
7258 		return;
7259 	}
7260 
7261 	num_of_int_contexts =
7262 			wlan_cfg_get_num_contexts(soc_cfg_ctx);
7263 
7264 	DP_TRACE_STATS(DEBUG, "No. of interrupt contexts: %u",
7265 		       soc_cfg_ctx->num_int_ctxts);
7266 	DP_TRACE_STATS(DEBUG, "Max clients: %u",
7267 		       soc_cfg_ctx->max_clients);
7268 	DP_TRACE_STATS(DEBUG, "Max alloc size: %u ",
7269 		       soc_cfg_ctx->max_alloc_size);
7270 	DP_TRACE_STATS(DEBUG, "Per pdev tx ring: %u ",
7271 		       soc_cfg_ctx->per_pdev_tx_ring);
7272 	DP_TRACE_STATS(DEBUG, "Num tcl data rings: %u ",
7273 		       soc_cfg_ctx->num_tcl_data_rings);
7274 	DP_TRACE_STATS(DEBUG, "Per pdev rx ring: %u ",
7275 		       soc_cfg_ctx->per_pdev_rx_ring);
7276 	DP_TRACE_STATS(DEBUG, "Per pdev lmac ring: %u ",
7277 		       soc_cfg_ctx->per_pdev_lmac_ring);
7278 	DP_TRACE_STATS(DEBUG, "Num of reo dest rings: %u ",
7279 		       soc_cfg_ctx->num_reo_dest_rings);
7280 	DP_TRACE_STATS(DEBUG, "Num tx desc pool: %u ",
7281 		       soc_cfg_ctx->num_tx_desc_pool);
7282 	DP_TRACE_STATS(DEBUG, "Num tx ext desc pool: %u ",
7283 		       soc_cfg_ctx->num_tx_ext_desc_pool);
7284 	DP_TRACE_STATS(DEBUG, "Num tx desc: %u ",
7285 		       soc_cfg_ctx->num_tx_desc);
7286 	DP_TRACE_STATS(DEBUG, "Num tx ext desc: %u ",
7287 		       soc_cfg_ctx->num_tx_ext_desc);
7288 	DP_TRACE_STATS(DEBUG, "Htt packet type: %u ",
7289 		       soc_cfg_ctx->htt_packet_type);
7290 	DP_TRACE_STATS(DEBUG, "Max peer_ids: %u ",
7291 		       soc_cfg_ctx->max_peer_id);
7292 	DP_TRACE_STATS(DEBUG, "Tx ring size: %u ",
7293 		       soc_cfg_ctx->tx_ring_size);
7294 	DP_TRACE_STATS(DEBUG, "Tx comp ring size: %u ",
7295 		       soc_cfg_ctx->tx_comp_ring_size);
7296 	DP_TRACE_STATS(DEBUG, "Tx comp ring size nss: %u ",
7297 		       soc_cfg_ctx->tx_comp_ring_size_nss);
7298 	DP_TRACE_STATS(DEBUG, "Int batch threshold tx: %u ",
7299 		       soc_cfg_ctx->int_batch_threshold_tx);
7300 	DP_TRACE_STATS(DEBUG, "Int timer threshold tx: %u ",
7301 		       soc_cfg_ctx->int_timer_threshold_tx);
7302 	DP_TRACE_STATS(DEBUG, "Int batch threshold rx: %u ",
7303 		       soc_cfg_ctx->int_batch_threshold_rx);
7304 	DP_TRACE_STATS(DEBUG, "Int timer threshold rx: %u ",
7305 		       soc_cfg_ctx->int_timer_threshold_rx);
7306 	DP_TRACE_STATS(DEBUG, "Int batch threshold other: %u ",
7307 		       soc_cfg_ctx->int_batch_threshold_other);
7308 	DP_TRACE_STATS(DEBUG, "Int timer threshold other: %u ",
7309 		       soc_cfg_ctx->int_timer_threshold_other);
7310 
7311 	for (i = 0; i < num_of_int_contexts; i++) {
7312 		index += qdf_snprint(&ring_mask[index],
7313 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7314 				     " %d",
7315 				     soc_cfg_ctx->int_tx_ring_mask[i]);
7316 	}
7317 
7318 	DP_TRACE_STATS(DEBUG, "Tx ring mask (0-%d):%s",
7319 		       num_of_int_contexts, ring_mask);
7320 
7321 	index = 0;
7322 	for (i = 0; i < num_of_int_contexts; i++) {
7323 		index += qdf_snprint(&ring_mask[index],
7324 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7325 				     " %d",
7326 				     soc_cfg_ctx->int_rx_ring_mask[i]);
7327 	}
7328 
7329 	DP_TRACE_STATS(DEBUG, "Rx ring mask (0-%d):%s",
7330 		       num_of_int_contexts, ring_mask);
7331 
7332 	index = 0;
7333 	for (i = 0; i < num_of_int_contexts; i++) {
7334 		index += qdf_snprint(&ring_mask[index],
7335 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7336 				     " %d",
7337 				     soc_cfg_ctx->int_rx_mon_ring_mask[i]);
7338 	}
7339 
7340 	DP_TRACE_STATS(DEBUG, "Rx mon ring mask (0-%d):%s",
7341 		       num_of_int_contexts, ring_mask);
7342 
7343 	index = 0;
7344 	for (i = 0; i < num_of_int_contexts; i++) {
7345 		index += qdf_snprint(&ring_mask[index],
7346 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7347 				     " %d",
7348 				     soc_cfg_ctx->int_rx_err_ring_mask[i]);
7349 	}
7350 
7351 	DP_TRACE_STATS(DEBUG, "Rx err ring mask (0-%d):%s",
7352 		       num_of_int_contexts, ring_mask);
7353 
7354 	index = 0;
7355 	for (i = 0; i < num_of_int_contexts; i++) {
7356 		index += qdf_snprint(&ring_mask[index],
7357 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7358 				     " %d",
7359 				     soc_cfg_ctx->int_rx_wbm_rel_ring_mask[i]);
7360 	}
7361 
7362 	DP_TRACE_STATS(DEBUG, "Rx wbm rel ring mask (0-%d):%s",
7363 		       num_of_int_contexts, ring_mask);
7364 
7365 	index = 0;
7366 	for (i = 0; i < num_of_int_contexts; i++) {
7367 		index += qdf_snprint(&ring_mask[index],
7368 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7369 				     " %d",
7370 				     soc_cfg_ctx->int_reo_status_ring_mask[i]);
7371 	}
7372 
7373 	DP_TRACE_STATS(DEBUG, "Reo ring mask (0-%d):%s",
7374 		       num_of_int_contexts, ring_mask);
7375 
7376 	index = 0;
7377 	for (i = 0; i < num_of_int_contexts; i++) {
7378 		index += qdf_snprint(&ring_mask[index],
7379 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7380 				     " %d",
7381 				     soc_cfg_ctx->int_rxdma2host_ring_mask[i]);
7382 	}
7383 
7384 	DP_TRACE_STATS(DEBUG, "Rxdma2host ring mask (0-%d):%s",
7385 		       num_of_int_contexts, ring_mask);
7386 
7387 	index = 0;
7388 	for (i = 0; i < num_of_int_contexts; i++) {
7389 		index += qdf_snprint(&ring_mask[index],
7390 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7391 				     " %d",
7392 				     soc_cfg_ctx->int_host2rxdma_ring_mask[i]);
7393 	}
7394 
7395 	DP_TRACE_STATS(DEBUG, "Host2rxdma ring mask (0-%d):%s",
7396 		       num_of_int_contexts, ring_mask);
7397 
7398 	DP_TRACE_STATS(DEBUG, "Rx hash: %u ",
7399 		       soc_cfg_ctx->rx_hash);
7400 	DP_TRACE_STATS(DEBUG, "Tso enabled: %u ",
7401 		       soc_cfg_ctx->tso_enabled);
7402 	DP_TRACE_STATS(DEBUG, "Lro enabled: %u ",
7403 		       soc_cfg_ctx->lro_enabled);
7404 	DP_TRACE_STATS(DEBUG, "Sg enabled: %u ",
7405 		       soc_cfg_ctx->sg_enabled);
7406 	DP_TRACE_STATS(DEBUG, "Gro enabled: %u ",
7407 		       soc_cfg_ctx->gro_enabled);
7408 	DP_TRACE_STATS(DEBUG, "rawmode enabled: %u ",
7409 		       soc_cfg_ctx->rawmode_enabled);
7410 	DP_TRACE_STATS(DEBUG, "peer flow ctrl enabled: %u ",
7411 		       soc_cfg_ctx->peer_flow_ctrl_enabled);
7412 	DP_TRACE_STATS(DEBUG, "napi enabled: %u ",
7413 		       soc_cfg_ctx->napi_enabled);
7414 	DP_TRACE_STATS(DEBUG, "Tcp Udp checksum offload: %u ",
7415 		       soc_cfg_ctx->tcp_udp_checksumoffload);
7416 	DP_TRACE_STATS(DEBUG, "Defrag timeout check: %u ",
7417 		       soc_cfg_ctx->defrag_timeout_check);
7418 	DP_TRACE_STATS(DEBUG, "Rx defrag min timeout: %u ",
7419 		       soc_cfg_ctx->rx_defrag_min_timeout);
7420 	DP_TRACE_STATS(DEBUG, "WBM release ring: %u ",
7421 		       soc_cfg_ctx->wbm_release_ring);
7422 	DP_TRACE_STATS(DEBUG, "TCL CMD ring: %u ",
7423 		       soc_cfg_ctx->tcl_cmd_ring);
7424 	DP_TRACE_STATS(DEBUG, "TCL Status ring: %u ",
7425 		       soc_cfg_ctx->tcl_status_ring);
7426 	DP_TRACE_STATS(DEBUG, "REO Reinject ring: %u ",
7427 		       soc_cfg_ctx->reo_reinject_ring);
7428 	DP_TRACE_STATS(DEBUG, "RX release ring: %u ",
7429 		       soc_cfg_ctx->rx_release_ring);
7430 	DP_TRACE_STATS(DEBUG, "REO Exception ring: %u ",
7431 		       soc_cfg_ctx->reo_exception_ring);
7432 	DP_TRACE_STATS(DEBUG, "REO CMD ring: %u ",
7433 		       soc_cfg_ctx->reo_cmd_ring);
7434 	DP_TRACE_STATS(DEBUG, "REO STATUS ring: %u ",
7435 		       soc_cfg_ctx->reo_status_ring);
7436 	DP_TRACE_STATS(DEBUG, "RXDMA refill ring: %u ",
7437 		       soc_cfg_ctx->rxdma_refill_ring);
7438 	DP_TRACE_STATS(DEBUG, "RXDMA err dst ring: %u ",
7439 		       soc_cfg_ctx->rxdma_err_dst_ring);
7440 }
7441 
7442 /**
7443  * dp_print_vdev_cfg_params() - Print the pdev cfg parameters
7444  * @pdev_handle: DP pdev handle
7445  *
7446  * Return - void
7447  */
7448 static void
7449 dp_print_pdev_cfg_params(struct dp_pdev *pdev)
7450 {
7451 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
7452 
7453 	if (!pdev) {
7454 		dp_err("Context is null");
7455 		return;
7456 	}
7457 
7458 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
7459 
7460 	if (!pdev_cfg_ctx) {
7461 		dp_err("Context is null");
7462 		return;
7463 	}
7464 
7465 	DP_TRACE_STATS(DEBUG, "Rx dma buf ring size: %d ",
7466 		       pdev_cfg_ctx->rx_dma_buf_ring_size);
7467 	DP_TRACE_STATS(DEBUG, "DMA Mon buf ring size: %d ",
7468 		       pdev_cfg_ctx->dma_mon_buf_ring_size);
7469 	DP_TRACE_STATS(DEBUG, "DMA Mon dest ring size: %d ",
7470 		       pdev_cfg_ctx->dma_mon_dest_ring_size);
7471 	DP_TRACE_STATS(DEBUG, "DMA Mon status ring size: %d ",
7472 		       pdev_cfg_ctx->dma_mon_status_ring_size);
7473 	DP_TRACE_STATS(DEBUG, "Rxdma monitor desc ring: %d",
7474 		       pdev_cfg_ctx->rxdma_monitor_desc_ring);
7475 	DP_TRACE_STATS(DEBUG, "Num mac rings: %d ",
7476 		       pdev_cfg_ctx->num_mac_rings);
7477 }
7478 
7479 /**
7480  * dp_txrx_stats_help() - Helper function for Txrx_Stats
7481  *
7482  * Return: None
7483  */
7484 static void dp_txrx_stats_help(void)
7485 {
7486 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
7487 	dp_info("stats_option:");
7488 	dp_info("  1 -- HTT Tx Statistics");
7489 	dp_info("  2 -- HTT Rx Statistics");
7490 	dp_info("  3 -- HTT Tx HW Queue Statistics");
7491 	dp_info("  4 -- HTT Tx HW Sched Statistics");
7492 	dp_info("  5 -- HTT Error Statistics");
7493 	dp_info("  6 -- HTT TQM Statistics");
7494 	dp_info("  7 -- HTT TQM CMDQ Statistics");
7495 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
7496 	dp_info("  9 -- HTT Tx Rate Statistics");
7497 	dp_info(" 10 -- HTT Rx Rate Statistics");
7498 	dp_info(" 11 -- HTT Peer Statistics");
7499 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
7500 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
7501 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
7502 	dp_info(" 15 -- HTT SRNG Statistics");
7503 	dp_info(" 16 -- HTT SFM Info Statistics");
7504 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
7505 	dp_info(" 18 -- HTT Peer List Details");
7506 	dp_info(" 20 -- Clear Host Statistics");
7507 	dp_info(" 21 -- Host Rx Rate Statistics");
7508 	dp_info(" 22 -- Host Tx Rate Statistics");
7509 	dp_info(" 23 -- Host Tx Statistics");
7510 	dp_info(" 24 -- Host Rx Statistics");
7511 	dp_info(" 25 -- Host AST Statistics");
7512 	dp_info(" 26 -- Host SRNG PTR Statistics");
7513 	dp_info(" 27 -- Host Mon Statistics");
7514 	dp_info(" 28 -- Host REO Queue Statistics");
7515 	dp_info(" 29 -- Host Soc cfg param Statistics");
7516 	dp_info(" 30 -- Host pdev cfg param Statistics");
7517 }
7518 
7519 /**
7520  * dp_print_host_stats()- Function to print the stats aggregated at host
7521  * @vdev_handle: DP_VDEV handle
7522  * @type: host stats type
7523  *
7524  * Return: 0 on success, print error message in case of failure
7525  */
7526 static int
7527 dp_print_host_stats(struct cdp_vdev *vdev_handle,
7528 		    struct cdp_txrx_stats_req *req)
7529 {
7530 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7531 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7532 	enum cdp_host_txrx_stats type =
7533 			dp_stats_mapping_table[req->stats][STATS_HOST];
7534 
7535 	dp_aggregate_pdev_stats(pdev);
7536 
7537 	switch (type) {
7538 	case TXRX_CLEAR_STATS:
7539 		dp_txrx_host_stats_clr(vdev);
7540 		break;
7541 	case TXRX_RX_RATE_STATS:
7542 		dp_print_rx_rates(vdev);
7543 		break;
7544 	case TXRX_TX_RATE_STATS:
7545 		dp_print_tx_rates(vdev);
7546 		break;
7547 	case TXRX_TX_HOST_STATS:
7548 		dp_print_pdev_tx_stats(pdev);
7549 		dp_print_soc_tx_stats(pdev->soc);
7550 		break;
7551 	case TXRX_RX_HOST_STATS:
7552 		dp_print_pdev_rx_stats(pdev);
7553 		dp_print_soc_rx_stats(pdev->soc);
7554 		break;
7555 	case TXRX_AST_STATS:
7556 		dp_print_ast_stats(pdev->soc);
7557 		dp_print_peer_table(vdev);
7558 		break;
7559 	case TXRX_SRNG_PTR_STATS:
7560 		dp_print_ring_stats(pdev);
7561 		break;
7562 	case TXRX_RX_MON_STATS:
7563 		dp_print_pdev_rx_mon_stats(pdev);
7564 		break;
7565 	case TXRX_REO_QUEUE_STATS:
7566 		dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
7567 		break;
7568 	case TXRX_SOC_CFG_PARAMS:
7569 		dp_print_soc_cfg_params(pdev->soc);
7570 		break;
7571 	case TXRX_PDEV_CFG_PARAMS:
7572 		dp_print_pdev_cfg_params(pdev);
7573 		break;
7574 	default:
7575 		dp_info("Wrong Input For TxRx Host Stats");
7576 		dp_txrx_stats_help();
7577 		break;
7578 	}
7579 	return 0;
7580 }
7581 
7582 /*
7583  * dp_ppdu_ring_reset()- Reset PPDU Stats ring
7584  * @pdev: DP_PDEV handle
7585  *
7586  * Return: void
7587  */
7588 static void
7589 dp_ppdu_ring_reset(struct dp_pdev *pdev)
7590 {
7591 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
7592 	int mac_id;
7593 
7594 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
7595 
7596 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
7597 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
7598 							pdev->pdev_id);
7599 
7600 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
7601 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
7602 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
7603 	}
7604 }
7605 
7606 /*
7607  * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
7608  * @pdev: DP_PDEV handle
7609  *
7610  * Return: void
7611  */
7612 static void
7613 dp_ppdu_ring_cfg(struct dp_pdev *pdev)
7614 {
7615 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
7616 	int mac_id;
7617 
7618 	htt_tlv_filter.mpdu_start = 1;
7619 	htt_tlv_filter.msdu_start = 0;
7620 	htt_tlv_filter.packet = 0;
7621 	htt_tlv_filter.msdu_end = 0;
7622 	htt_tlv_filter.mpdu_end = 0;
7623 	htt_tlv_filter.attention = 0;
7624 	htt_tlv_filter.ppdu_start = 1;
7625 	htt_tlv_filter.ppdu_end = 1;
7626 	htt_tlv_filter.ppdu_end_user_stats = 1;
7627 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
7628 	htt_tlv_filter.ppdu_end_status_done = 1;
7629 	htt_tlv_filter.enable_fp = 1;
7630 	htt_tlv_filter.enable_md = 0;
7631 	if (pdev->neighbour_peers_added &&
7632 	    pdev->soc->hw_nac_monitor_support) {
7633 		htt_tlv_filter.enable_md = 1;
7634 		htt_tlv_filter.packet_header = 1;
7635 	}
7636 	if (pdev->mcopy_mode) {
7637 		htt_tlv_filter.packet_header = 1;
7638 		htt_tlv_filter.enable_mo = 1;
7639 	}
7640 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
7641 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
7642 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
7643 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
7644 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
7645 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
7646 	if (pdev->neighbour_peers_added &&
7647 	    pdev->soc->hw_nac_monitor_support)
7648 		htt_tlv_filter.md_data_filter = FILTER_DATA_ALL;
7649 
7650 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
7651 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
7652 						pdev->pdev_id);
7653 
7654 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
7655 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
7656 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
7657 	}
7658 }
7659 
7660 /*
7661  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
7662  *                              modes are enabled or not.
7663  * @dp_pdev: dp pdev handle.
7664  *
7665  * Return: bool
7666  */
7667 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
7668 {
7669 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
7670 	    !pdev->mcopy_mode)
7671 		return true;
7672 	else
7673 		return false;
7674 }
7675 
7676 /*
7677  *dp_set_bpr_enable() - API to enable/disable bpr feature
7678  *@pdev_handle: DP_PDEV handle.
7679  *@val: Provided value.
7680  *
7681  *Return: 0 for success. nonzero for failure.
7682  */
7683 static QDF_STATUS
7684 dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
7685 {
7686 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7687 
7688 	switch (val) {
7689 	case CDP_BPR_DISABLE:
7690 		pdev->bpr_enable = CDP_BPR_DISABLE;
7691 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7692 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
7693 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7694 		} else if (pdev->enhanced_stats_en &&
7695 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7696 			   !pdev->pktlog_ppdu_stats) {
7697 			dp_h2t_cfg_stats_msg_send(pdev,
7698 						  DP_PPDU_STATS_CFG_ENH_STATS,
7699 						  pdev->pdev_id);
7700 		}
7701 		break;
7702 	case CDP_BPR_ENABLE:
7703 		pdev->bpr_enable = CDP_BPR_ENABLE;
7704 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
7705 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
7706 			dp_h2t_cfg_stats_msg_send(pdev,
7707 						  DP_PPDU_STATS_CFG_BPR,
7708 						  pdev->pdev_id);
7709 		} else if (pdev->enhanced_stats_en &&
7710 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7711 			   !pdev->pktlog_ppdu_stats) {
7712 			dp_h2t_cfg_stats_msg_send(pdev,
7713 						  DP_PPDU_STATS_CFG_BPR_ENH,
7714 						  pdev->pdev_id);
7715 		} else if (pdev->pktlog_ppdu_stats) {
7716 			dp_h2t_cfg_stats_msg_send(pdev,
7717 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
7718 						  pdev->pdev_id);
7719 		}
7720 		break;
7721 	default:
7722 		break;
7723 	}
7724 
7725 	return QDF_STATUS_SUCCESS;
7726 }
7727 
7728 /*
7729  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
7730  * @pdev_handle: DP_PDEV handle
7731  * @val: user provided value
7732  *
7733  * Return: 0 for success. nonzero for failure.
7734  */
7735 static QDF_STATUS
7736 dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
7737 {
7738 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7739 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7740 
7741 	if (pdev->mcopy_mode)
7742 		dp_reset_monitor_mode(pdev_handle);
7743 
7744 	switch (val) {
7745 	case 0:
7746 		pdev->tx_sniffer_enable = 0;
7747 		pdev->mcopy_mode = 0;
7748 		pdev->monitor_configured = false;
7749 
7750 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7751 		    !pdev->bpr_enable) {
7752 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7753 			dp_ppdu_ring_reset(pdev);
7754 		} else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
7755 			dp_h2t_cfg_stats_msg_send(pdev,
7756 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7757 		} else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
7758 			dp_h2t_cfg_stats_msg_send(pdev,
7759 						  DP_PPDU_STATS_CFG_BPR_ENH,
7760 						  pdev->pdev_id);
7761 		} else {
7762 			dp_h2t_cfg_stats_msg_send(pdev,
7763 						  DP_PPDU_STATS_CFG_BPR,
7764 						  pdev->pdev_id);
7765 		}
7766 		break;
7767 
7768 	case 1:
7769 		pdev->tx_sniffer_enable = 1;
7770 		pdev->mcopy_mode = 0;
7771 		pdev->monitor_configured = false;
7772 
7773 		if (!pdev->pktlog_ppdu_stats)
7774 			dp_h2t_cfg_stats_msg_send(pdev,
7775 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7776 		break;
7777 	case 2:
7778 		if (pdev->monitor_vdev) {
7779 			status = QDF_STATUS_E_RESOURCES;
7780 			break;
7781 		}
7782 
7783 		pdev->mcopy_mode = 1;
7784 		dp_pdev_configure_monitor_rings(pdev);
7785 		pdev->monitor_configured = true;
7786 		pdev->tx_sniffer_enable = 0;
7787 
7788 		if (!pdev->pktlog_ppdu_stats)
7789 			dp_h2t_cfg_stats_msg_send(pdev,
7790 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7791 		break;
7792 	default:
7793 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7794 			"Invalid value");
7795 		break;
7796 	}
7797 	return status;
7798 }
7799 
7800 /*
7801  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
7802  * @pdev_handle: DP_PDEV handle
7803  *
7804  * Return: void
7805  */
7806 static void
7807 dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
7808 {
7809 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7810 
7811 	if (pdev->enhanced_stats_en == 0)
7812 		dp_cal_client_timer_start(pdev->cal_client_ctx);
7813 
7814 	pdev->enhanced_stats_en = 1;
7815 
7816 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
7817 	    !pdev->monitor_vdev)
7818 		dp_ppdu_ring_cfg(pdev);
7819 
7820 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7821 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7822 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7823 		dp_h2t_cfg_stats_msg_send(pdev,
7824 					  DP_PPDU_STATS_CFG_BPR_ENH,
7825 					  pdev->pdev_id);
7826 	}
7827 }
7828 
7829 /*
7830  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
7831  * @pdev_handle: DP_PDEV handle
7832  *
7833  * Return: void
7834  */
7835 static void
7836 dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
7837 {
7838 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7839 
7840 	if (pdev->enhanced_stats_en == 1)
7841 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
7842 
7843 	pdev->enhanced_stats_en = 0;
7844 
7845 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7846 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7847 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7848 		dp_h2t_cfg_stats_msg_send(pdev,
7849 					  DP_PPDU_STATS_CFG_BPR,
7850 					  pdev->pdev_id);
7851 	}
7852 
7853 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
7854 	    !pdev->monitor_vdev)
7855 		dp_ppdu_ring_reset(pdev);
7856 }
7857 
7858 /*
7859  * dp_get_fw_peer_stats()- function to print peer stats
7860  * @pdev_handle: DP_PDEV handle
7861  * @mac_addr: mac address of the peer
7862  * @cap: Type of htt stats requested
7863  * @is_wait: if set, wait on completion from firmware response
7864  *
7865  * Currently Supporting only MAC ID based requests Only
7866  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
7867  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
7868  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
7869  *
7870  * Return: void
7871  */
7872 static void
7873 dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
7874 		uint32_t cap, uint32_t is_wait)
7875 {
7876 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7877 	int i;
7878 	uint32_t config_param0 = 0;
7879 	uint32_t config_param1 = 0;
7880 	uint32_t config_param2 = 0;
7881 	uint32_t config_param3 = 0;
7882 
7883 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
7884 	config_param0 |= (1 << (cap + 1));
7885 
7886 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
7887 		config_param1 |= (1 << i);
7888 	}
7889 
7890 	config_param2 |= (mac_addr[0] & 0x000000ff);
7891 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
7892 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
7893 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
7894 
7895 	config_param3 |= (mac_addr[4] & 0x000000ff);
7896 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
7897 
7898 	if (is_wait) {
7899 		qdf_event_reset(&pdev->fw_peer_stats_event);
7900 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
7901 					  config_param0, config_param1,
7902 					  config_param2, config_param3,
7903 					  0, 1, 0);
7904 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
7905 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
7906 	} else {
7907 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
7908 					  config_param0, config_param1,
7909 					  config_param2, config_param3,
7910 					  0, 0, 0);
7911 	}
7912 
7913 }
7914 
7915 /* This struct definition will be removed from here
7916  * once it get added in FW headers*/
7917 struct httstats_cmd_req {
7918     uint32_t    config_param0;
7919     uint32_t    config_param1;
7920     uint32_t    config_param2;
7921     uint32_t    config_param3;
7922     int cookie;
7923     u_int8_t    stats_id;
7924 };
7925 
7926 /*
7927  * dp_get_htt_stats: function to process the httstas request
7928  * @pdev_handle: DP pdev handle
7929  * @data: pointer to request data
7930  * @data_len: length for request data
7931  *
7932  * return: void
7933  */
7934 static void
7935 dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
7936 {
7937 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7938 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
7939 
7940 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
7941 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
7942 				req->config_param0, req->config_param1,
7943 				req->config_param2, req->config_param3,
7944 				req->cookie, 0, 0);
7945 }
7946 
7947 /*
7948  * dp_set_pdev_param: function to set parameters in pdev
7949  * @pdev_handle: DP pdev handle
7950  * @param: parameter type to be set
7951  * @val: value of parameter to be set
7952  *
7953  * Return: 0 for success. nonzero for failure.
7954  */
7955 static QDF_STATUS dp_set_pdev_param(struct cdp_pdev *pdev_handle,
7956 				    enum cdp_pdev_param_type param,
7957 				    uint8_t val)
7958 {
7959 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7960 	switch (param) {
7961 	case CDP_CONFIG_DEBUG_SNIFFER:
7962 		return dp_config_debug_sniffer(pdev_handle, val);
7963 	case CDP_CONFIG_BPR_ENABLE:
7964 		return dp_set_bpr_enable(pdev_handle, val);
7965 	case CDP_CONFIG_PRIMARY_RADIO:
7966 		pdev->is_primary = val;
7967 		break;
7968 	default:
7969 		return QDF_STATUS_E_INVAL;
7970 	}
7971 	return QDF_STATUS_SUCCESS;
7972 }
7973 
7974 /*
7975  * dp_get_vdev_param: function to get parameters from vdev
7976  * @param: parameter type to get value
7977  *
7978  * return: void
7979  */
7980 static uint32_t dp_get_vdev_param(struct cdp_vdev *vdev_handle,
7981 				  enum cdp_vdev_param_type param)
7982 {
7983 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7984 	uint32_t val;
7985 
7986 	switch (param) {
7987 	case CDP_ENABLE_WDS:
7988 		val = vdev->wds_enabled;
7989 		break;
7990 	case CDP_ENABLE_MEC:
7991 		val = vdev->mec_enabled;
7992 		break;
7993 	case CDP_ENABLE_DA_WAR:
7994 		val = vdev->pdev->soc->da_war_enabled;
7995 		break;
7996 	default:
7997 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7998 			  "param value %d is wrong\n",
7999 			  param);
8000 		val = -1;
8001 		break;
8002 	}
8003 
8004 	return val;
8005 }
8006 
8007 /*
8008  * dp_set_vdev_param: function to set parameters in vdev
8009  * @param: parameter type to be set
8010  * @val: value of parameter to be set
8011  *
8012  * return: void
8013  */
8014 static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
8015 		enum cdp_vdev_param_type param, uint32_t val)
8016 {
8017 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8018 	switch (param) {
8019 	case CDP_ENABLE_WDS:
8020 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8021 			  "wds_enable %d for vdev(%p) id(%d)\n",
8022 			  val, vdev, vdev->vdev_id);
8023 		vdev->wds_enabled = val;
8024 		break;
8025 	case CDP_ENABLE_MEC:
8026 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8027 			  "mec_enable %d for vdev(%p) id(%d)\n",
8028 			  val, vdev, vdev->vdev_id);
8029 		vdev->mec_enabled = val;
8030 		break;
8031 	case CDP_ENABLE_DA_WAR:
8032 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8033 			  "da_war_enable %d for vdev(%p) id(%d)\n",
8034 			  val, vdev, vdev->vdev_id);
8035 		vdev->pdev->soc->da_war_enabled = val;
8036 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
8037 					     vdev->pdev->soc));
8038 		break;
8039 	case CDP_ENABLE_NAWDS:
8040 		vdev->nawds_enabled = val;
8041 		break;
8042 	case CDP_ENABLE_MCAST_EN:
8043 		vdev->mcast_enhancement_en = val;
8044 		break;
8045 	case CDP_ENABLE_PROXYSTA:
8046 		vdev->proxysta_vdev = val;
8047 		break;
8048 	case CDP_UPDATE_TDLS_FLAGS:
8049 		vdev->tdls_link_connected = val;
8050 		break;
8051 	case CDP_CFG_WDS_AGING_TIMER:
8052 		if (val == 0)
8053 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
8054 		else if (val != vdev->wds_aging_timer_val)
8055 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, val);
8056 
8057 		vdev->wds_aging_timer_val = val;
8058 		break;
8059 	case CDP_ENABLE_AP_BRIDGE:
8060 		if (wlan_op_mode_sta != vdev->opmode)
8061 			vdev->ap_bridge_enabled = val;
8062 		else
8063 			vdev->ap_bridge_enabled = false;
8064 		break;
8065 	case CDP_ENABLE_CIPHER:
8066 		vdev->sec_type = val;
8067 		break;
8068 	case CDP_ENABLE_QWRAP_ISOLATION:
8069 		vdev->isolation_vdev = val;
8070 		break;
8071 	default:
8072 		break;
8073 	}
8074 
8075 	dp_tx_vdev_update_search_flags(vdev);
8076 }
8077 
8078 /**
8079  * dp_peer_set_nawds: set nawds bit in peer
8080  * @peer_handle: pointer to peer
8081  * @value: enable/disable nawds
8082  *
8083  * return: void
8084  */
8085 static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
8086 {
8087 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8088 	peer->nawds_enabled = value;
8089 }
8090 
8091 /*
8092  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
8093  * @vdev_handle: DP_VDEV handle
8094  * @map_id:ID of map that needs to be updated
8095  *
8096  * Return: void
8097  */
8098 static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
8099 		uint8_t map_id)
8100 {
8101 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8102 	vdev->dscp_tid_map_id = map_id;
8103 	return;
8104 }
8105 
8106 #ifdef DP_RATETABLE_SUPPORT
8107 static int dp_txrx_get_ratekbps(int preamb, int mcs,
8108 				int htflag, int gintval)
8109 {
8110 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
8111 			       (uint8_t)preamb, 1);
8112 }
8113 #else
8114 static int dp_txrx_get_ratekbps(int preamb, int mcs,
8115 				int htflag, int gintval)
8116 {
8117 	return 0;
8118 }
8119 #endif
8120 
8121 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
8122  * @peer_handle: DP pdev handle
8123  *
8124  * return : cdp_pdev_stats pointer
8125  */
8126 static struct cdp_pdev_stats*
8127 dp_txrx_get_pdev_stats(struct cdp_pdev *pdev_handle)
8128 {
8129 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8130 
8131 	dp_aggregate_pdev_stats(pdev);
8132 
8133 	return &pdev->stats;
8134 }
8135 
8136 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
8137  * @peer_handle: DP_PEER handle
8138  *
8139  * return : cdp_peer_stats pointer
8140  */
8141 static struct cdp_peer_stats*
8142 		dp_txrx_get_peer_stats(struct cdp_peer *peer_handle)
8143 {
8144 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8145 
8146 	qdf_assert(peer);
8147 
8148 	return &peer->stats;
8149 }
8150 
8151 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
8152  * @peer_handle: DP_PEER handle
8153  *
8154  * return : void
8155  */
8156 static void dp_txrx_reset_peer_stats(struct cdp_peer *peer_handle)
8157 {
8158 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8159 
8160 	qdf_assert(peer);
8161 
8162 	qdf_mem_zero(&peer->stats, sizeof(peer->stats));
8163 }
8164 
8165 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
8166  * @vdev_handle: DP_VDEV handle
8167  * @buf: buffer for vdev stats
8168  *
8169  * return : int
8170  */
8171 static int  dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
8172 				   bool is_aggregate)
8173 {
8174 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8175 	struct cdp_vdev_stats *vdev_stats;
8176 	struct dp_pdev *pdev;
8177 	struct dp_soc *soc;
8178 
8179 	if (!vdev)
8180 		return 1;
8181 
8182 	pdev = vdev->pdev;
8183 	if (!pdev)
8184 		return 1;
8185 
8186 	soc = pdev->soc;
8187 	vdev_stats = (struct cdp_vdev_stats *)buf;
8188 
8189 	if (is_aggregate) {
8190 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
8191 		dp_aggregate_vdev_stats(vdev, buf);
8192 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
8193 	} else {
8194 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
8195 	}
8196 
8197 	return 0;
8198 }
8199 
8200 /*
8201  * dp_get_total_per(): get total per
8202  * @pdev_handle: DP_PDEV handle
8203  *
8204  * Return: % error rate using retries per packet and success packets
8205  */
8206 static int dp_get_total_per(struct cdp_pdev *pdev_handle)
8207 {
8208 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8209 
8210 	dp_aggregate_pdev_stats(pdev);
8211 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
8212 		return 0;
8213 	return ((pdev->stats.tx.retries * 100) /
8214 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
8215 }
8216 
8217 /*
8218  * dp_txrx_stats_publish(): publish pdev stats into a buffer
8219  * @pdev_handle: DP_PDEV handle
8220  * @buf: to hold pdev_stats
8221  *
8222  * Return: int
8223  */
8224 static int
8225 dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
8226 {
8227 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8228 	struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
8229 	struct cdp_txrx_stats_req req = {0,};
8230 
8231 	dp_aggregate_pdev_stats(pdev);
8232 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
8233 	req.cookie_val = 1;
8234 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
8235 				req.param1, req.param2, req.param3, 0,
8236 				req.cookie_val, 0);
8237 
8238 	msleep(DP_MAX_SLEEP_TIME);
8239 
8240 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
8241 	req.cookie_val = 1;
8242 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
8243 				req.param1, req.param2, req.param3, 0,
8244 				req.cookie_val, 0);
8245 
8246 	msleep(DP_MAX_SLEEP_TIME);
8247 	qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
8248 
8249 	return TXRX_STATS_LEVEL;
8250 }
8251 
8252 /**
8253  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
8254  * @pdev: DP_PDEV handle
8255  * @map_id: ID of map that needs to be updated
8256  * @tos: index value in map
8257  * @tid: tid value passed by the user
8258  *
8259  * Return: void
8260  */
8261 static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
8262 		uint8_t map_id, uint8_t tos, uint8_t tid)
8263 {
8264 	uint8_t dscp;
8265 	struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
8266 	struct dp_soc *soc = pdev->soc;
8267 
8268 	if (!soc)
8269 		return;
8270 
8271 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
8272 	pdev->dscp_tid_map[map_id][dscp] = tid;
8273 
8274 	if (map_id < soc->num_hw_dscp_tid_map)
8275 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
8276 				       map_id, dscp);
8277 	return;
8278 }
8279 
8280 /**
8281  * dp_hmmc_tid_override_en_wifi3(): Function to enable hmmc tid override.
8282  * @pdev_handle: pdev handle
8283  * @val: hmmc-dscp flag value
8284  *
8285  * Return: void
8286  */
8287 static void dp_hmmc_tid_override_en_wifi3(struct cdp_pdev *pdev_handle,
8288 					  bool val)
8289 {
8290 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8291 
8292 	pdev->hmmc_tid_override_en = val;
8293 }
8294 
8295 /**
8296  * dp_set_hmmc_tid_val_wifi3(): Function to set hmmc tid value.
8297  * @pdev_handle: pdev handle
8298  * @tid: tid value
8299  *
8300  * Return: void
8301  */
8302 static void dp_set_hmmc_tid_val_wifi3(struct cdp_pdev *pdev_handle,
8303 				      uint8_t tid)
8304 {
8305 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8306 
8307 	pdev->hmmc_tid = tid;
8308 }
8309 
8310 /**
8311  * dp_fw_stats_process(): Process TxRX FW stats request
8312  * @vdev_handle: DP VDEV handle
8313  * @req: stats request
8314  *
8315  * return: int
8316  */
8317 static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
8318 		struct cdp_txrx_stats_req *req)
8319 {
8320 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8321 	struct dp_pdev *pdev = NULL;
8322 	uint32_t stats = req->stats;
8323 	uint8_t mac_id = req->mac_id;
8324 
8325 	if (!vdev) {
8326 		DP_TRACE(NONE, "VDEV not found");
8327 		return 1;
8328 	}
8329 	pdev = vdev->pdev;
8330 
8331 	/*
8332 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
8333 	 * from param0 to param3 according to below rule:
8334 	 *
8335 	 * PARAM:
8336 	 *   - config_param0 : start_offset (stats type)
8337 	 *   - config_param1 : stats bmask from start offset
8338 	 *   - config_param2 : stats bmask from start offset + 32
8339 	 *   - config_param3 : stats bmask from start offset + 64
8340 	 */
8341 	if (req->stats == CDP_TXRX_STATS_0) {
8342 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
8343 		req->param1 = 0xFFFFFFFF;
8344 		req->param2 = 0xFFFFFFFF;
8345 		req->param3 = 0xFFFFFFFF;
8346 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
8347 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
8348 	}
8349 
8350 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
8351 				req->param1, req->param2, req->param3,
8352 				0, 0, mac_id);
8353 }
8354 
8355 /**
8356  * dp_txrx_stats_request - function to map to firmware and host stats
8357  * @vdev: virtual handle
8358  * @req: stats request
8359  *
8360  * Return: QDF_STATUS
8361  */
8362 static
8363 QDF_STATUS dp_txrx_stats_request(struct cdp_vdev *vdev,
8364 				 struct cdp_txrx_stats_req *req)
8365 {
8366 	int host_stats;
8367 	int fw_stats;
8368 	enum cdp_stats stats;
8369 	int num_stats;
8370 
8371 	if (!vdev || !req) {
8372 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8373 				"Invalid vdev/req instance");
8374 		return QDF_STATUS_E_INVAL;
8375 	}
8376 
8377 	stats = req->stats;
8378 	if (stats >= CDP_TXRX_MAX_STATS)
8379 		return QDF_STATUS_E_INVAL;
8380 
8381 	/*
8382 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
8383 	 *			has to be updated if new FW HTT stats added
8384 	 */
8385 	if (stats > CDP_TXRX_STATS_HTT_MAX)
8386 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
8387 
8388 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
8389 
8390 	if (stats >= num_stats) {
8391 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8392 			  "%s: Invalid stats option: %d", __func__, stats);
8393 		return QDF_STATUS_E_INVAL;
8394 	}
8395 
8396 	req->stats = stats;
8397 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
8398 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
8399 
8400 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8401 		 "stats: %u fw_stats_type: %d host_stats: %d",
8402 		  stats, fw_stats, host_stats);
8403 
8404 	if (fw_stats != TXRX_FW_STATS_INVALID) {
8405 		/* update request with FW stats type */
8406 		req->stats = fw_stats;
8407 		return dp_fw_stats_process(vdev, req);
8408 	}
8409 
8410 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
8411 			(host_stats <= TXRX_HOST_STATS_MAX))
8412 		return dp_print_host_stats(vdev, req);
8413 	else
8414 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8415 				"Wrong Input for TxRx Stats");
8416 
8417 	return QDF_STATUS_SUCCESS;
8418 }
8419 
8420 /*
8421  * dp_print_napi_stats(): NAPI stats
8422  * @soc - soc handle
8423  */
8424 static void dp_print_napi_stats(struct dp_soc *soc)
8425 {
8426 	hif_print_napi_stats(soc->hif_handle);
8427 }
8428 
8429 /*
8430  * dp_print_per_ring_stats(): Packet count per ring
8431  * @soc - soc handle
8432  */
8433 static void dp_print_per_ring_stats(struct dp_soc *soc)
8434 {
8435 	uint8_t ring;
8436 	uint16_t core;
8437 	uint64_t total_packets;
8438 
8439 	DP_TRACE_STATS(INFO_HIGH, "Reo packets per ring:");
8440 	for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
8441 		total_packets = 0;
8442 		DP_TRACE_STATS(INFO_HIGH,
8443 			       "Packets on ring %u:", ring);
8444 		for (core = 0; core < NR_CPUS; core++) {
8445 			DP_TRACE_STATS(INFO_HIGH,
8446 				       "Packets arriving on core %u: %llu",
8447 				       core,
8448 				       soc->stats.rx.ring_packets[core][ring]);
8449 			total_packets += soc->stats.rx.ring_packets[core][ring];
8450 		}
8451 		DP_TRACE_STATS(INFO_HIGH,
8452 			       "Total packets on ring %u: %llu",
8453 			       ring, total_packets);
8454 	}
8455 }
8456 
8457 /*
8458  * dp_txrx_path_stats() - Function to display dump stats
8459  * @soc - soc handle
8460  *
8461  * return: none
8462  */
8463 static void dp_txrx_path_stats(struct dp_soc *soc)
8464 {
8465 	uint8_t error_code;
8466 	uint8_t loop_pdev;
8467 	struct dp_pdev *pdev;
8468 	uint8_t i;
8469 
8470 	if (!soc) {
8471 		DP_TRACE(ERROR, "%s: Invalid access",
8472 			 __func__);
8473 		return;
8474 	}
8475 
8476 	for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
8477 
8478 		pdev = soc->pdev_list[loop_pdev];
8479 		dp_aggregate_pdev_stats(pdev);
8480 		DP_TRACE_STATS(INFO_HIGH, "Tx path Statistics:");
8481 		DP_TRACE_STATS(INFO_HIGH, "from stack: %u msdus (%llu bytes)",
8482 			       pdev->stats.tx_i.rcvd.num,
8483 			       pdev->stats.tx_i.rcvd.bytes);
8484 		DP_TRACE_STATS(INFO_HIGH,
8485 			       "processed from host: %u msdus (%llu bytes)",
8486 			       pdev->stats.tx_i.processed.num,
8487 			       pdev->stats.tx_i.processed.bytes);
8488 		DP_TRACE_STATS(INFO_HIGH,
8489 			       "successfully transmitted: %u msdus (%llu bytes)",
8490 			       pdev->stats.tx.tx_success.num,
8491 			       pdev->stats.tx.tx_success.bytes);
8492 
8493 		DP_TRACE_STATS(INFO_HIGH, "Dropped in host:");
8494 		DP_TRACE_STATS(INFO_HIGH, "Total packets dropped: %u,",
8495 			       pdev->stats.tx_i.dropped.dropped_pkt.num);
8496 		DP_TRACE_STATS(INFO_HIGH, "Descriptor not available: %u",
8497 			       pdev->stats.tx_i.dropped.desc_na.num);
8498 		DP_TRACE_STATS(INFO_HIGH, "Ring full: %u",
8499 			       pdev->stats.tx_i.dropped.ring_full);
8500 		DP_TRACE_STATS(INFO_HIGH, "Enqueue fail: %u",
8501 			       pdev->stats.tx_i.dropped.enqueue_fail);
8502 		DP_TRACE_STATS(INFO_HIGH, "DMA Error: %u",
8503 			       pdev->stats.tx_i.dropped.dma_error);
8504 
8505 		DP_TRACE_STATS(INFO_HIGH, "Dropped in hardware:");
8506 		DP_TRACE_STATS(INFO_HIGH, "total packets dropped: %u",
8507 			       pdev->stats.tx.tx_failed);
8508 		DP_TRACE_STATS(INFO_HIGH, "mpdu age out: %u",
8509 			       pdev->stats.tx.dropped.age_out);
8510 		DP_TRACE_STATS(INFO_HIGH, "firmware removed packets: %u",
8511 			       pdev->stats.tx.dropped.fw_rem.num);
8512 		DP_TRACE_STATS(INFO_HIGH, "firmware removed bytes: %llu",
8513 			       pdev->stats.tx.dropped.fw_rem.bytes);
8514 		DP_TRACE_STATS(INFO_HIGH, "firmware removed tx: %u",
8515 			       pdev->stats.tx.dropped.fw_rem_tx);
8516 		DP_TRACE_STATS(INFO_HIGH, "firmware removed notx %u",
8517 			       pdev->stats.tx.dropped.fw_rem_notx);
8518 		DP_TRACE_STATS(INFO_HIGH, "Invalid peer on tx path: %u",
8519 			       pdev->soc->stats.tx.tx_invalid_peer.num);
8520 
8521 		DP_TRACE_STATS(INFO_HIGH, "Tx packets sent per interrupt:");
8522 		DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
8523 			       pdev->stats.tx_comp_histogram.pkts_1);
8524 		DP_TRACE_STATS(INFO_HIGH, "2-20 Packets:  %u",
8525 			       pdev->stats.tx_comp_histogram.pkts_2_20);
8526 		DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
8527 			       pdev->stats.tx_comp_histogram.pkts_21_40);
8528 		DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
8529 			       pdev->stats.tx_comp_histogram.pkts_41_60);
8530 		DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
8531 			       pdev->stats.tx_comp_histogram.pkts_61_80);
8532 		DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
8533 			       pdev->stats.tx_comp_histogram.pkts_81_100);
8534 		DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
8535 			       pdev->stats.tx_comp_histogram.pkts_101_200);
8536 		DP_TRACE_STATS(INFO_HIGH, "   201+ Packets: %u",
8537 			       pdev->stats.tx_comp_histogram.pkts_201_plus);
8538 
8539 		DP_TRACE_STATS(INFO_HIGH, "Rx path statistics");
8540 
8541 		DP_TRACE_STATS(INFO_HIGH,
8542 			       "delivered %u msdus ( %llu bytes),",
8543 			       pdev->stats.rx.to_stack.num,
8544 			       pdev->stats.rx.to_stack.bytes);
8545 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)
8546 			DP_TRACE_STATS(INFO_HIGH,
8547 				       "received on reo[%d] %u msdus( %llu bytes),",
8548 				       i, pdev->stats.rx.rcvd_reo[i].num,
8549 				       pdev->stats.rx.rcvd_reo[i].bytes);
8550 		DP_TRACE_STATS(INFO_HIGH,
8551 			       "intra-bss packets %u msdus ( %llu bytes),",
8552 			       pdev->stats.rx.intra_bss.pkts.num,
8553 			       pdev->stats.rx.intra_bss.pkts.bytes);
8554 		DP_TRACE_STATS(INFO_HIGH,
8555 			       "intra-bss fails %u msdus ( %llu bytes),",
8556 			       pdev->stats.rx.intra_bss.fail.num,
8557 			       pdev->stats.rx.intra_bss.fail.bytes);
8558 		DP_TRACE_STATS(INFO_HIGH,
8559 			       "raw packets %u msdus ( %llu bytes),",
8560 			       pdev->stats.rx.raw.num,
8561 			       pdev->stats.rx.raw.bytes);
8562 		DP_TRACE_STATS(INFO_HIGH, "dropped: error %u msdus",
8563 			       pdev->stats.rx.err.mic_err);
8564 		DP_TRACE_STATS(INFO_HIGH, "Invalid peer on rx path: %u",
8565 			       pdev->soc->stats.rx.err.rx_invalid_peer.num);
8566 		DP_TRACE_STATS(INFO_HIGH, "sw_peer_id invalid %u",
8567 			       pdev->soc->stats.rx.err.rx_invalid_peer_id.num);
8568 
8569 
8570 		DP_TRACE_STATS(INFO_HIGH, "Reo Statistics");
8571 		DP_TRACE_STATS(INFO_HIGH, "rbm error: %u msdus",
8572 			       pdev->soc->stats.rx.err.invalid_rbm);
8573 		DP_TRACE_STATS(INFO_HIGH, "hal ring access fail: %u msdus",
8574 			       pdev->soc->stats.rx.err.hal_ring_access_fail);
8575 
8576 		for (error_code = 0; error_code < HAL_REO_ERR_MAX;
8577 				error_code++) {
8578 			if (!pdev->soc->stats.rx.err.reo_error[error_code])
8579 				continue;
8580 			DP_TRACE_STATS(INFO_HIGH,
8581 				       "Reo error number (%u): %u msdus",
8582 				       error_code,
8583 				       pdev->soc->stats.rx.err
8584 				       .reo_error[error_code]);
8585 		}
8586 
8587 		for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
8588 				error_code++) {
8589 			if (!pdev->soc->stats.rx.err.rxdma_error[error_code])
8590 				continue;
8591 			DP_TRACE_STATS(INFO_HIGH,
8592 				       "Rxdma error number (%u): %u msdus",
8593 				       error_code,
8594 				       pdev->soc->stats.rx.err
8595 				       .rxdma_error[error_code]);
8596 		}
8597 
8598 		DP_TRACE_STATS(INFO_HIGH, "Rx packets reaped per interrupt:");
8599 		DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
8600 			       pdev->stats.rx_ind_histogram.pkts_1);
8601 		DP_TRACE_STATS(INFO_HIGH, "2-20 Packets:  %u",
8602 			       pdev->stats.rx_ind_histogram.pkts_2_20);
8603 		DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
8604 			       pdev->stats.rx_ind_histogram.pkts_21_40);
8605 		DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
8606 			       pdev->stats.rx_ind_histogram.pkts_41_60);
8607 		DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
8608 			       pdev->stats.rx_ind_histogram.pkts_61_80);
8609 		DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
8610 			       pdev->stats.rx_ind_histogram.pkts_81_100);
8611 		DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
8612 			       pdev->stats.rx_ind_histogram.pkts_101_200);
8613 		DP_TRACE_STATS(INFO_HIGH, "   201+ Packets: %u",
8614 			       pdev->stats.rx_ind_histogram.pkts_201_plus);
8615 
8616 		DP_TRACE_STATS(INFO_HIGH, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
8617 			       __func__,
8618 			       pdev->soc->wlan_cfg_ctx
8619 			       ->tso_enabled,
8620 			       pdev->soc->wlan_cfg_ctx
8621 			       ->lro_enabled,
8622 			       pdev->soc->wlan_cfg_ctx
8623 			       ->rx_hash,
8624 			       pdev->soc->wlan_cfg_ctx
8625 			       ->napi_enabled);
8626 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
8627 		DP_TRACE_STATS(INFO_HIGH, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
8628 			       __func__,
8629 			       pdev->soc->wlan_cfg_ctx
8630 			       ->tx_flow_stop_queue_threshold,
8631 			       pdev->soc->wlan_cfg_ctx
8632 			       ->tx_flow_start_queue_offset);
8633 #endif
8634 	}
8635 }
8636 
8637 /*
8638  * dp_txrx_dump_stats() -  Dump statistics
8639  * @value - Statistics option
8640  */
8641 static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
8642 				     enum qdf_stats_verbosity_level level)
8643 {
8644 	struct dp_soc *soc =
8645 		(struct dp_soc *)psoc;
8646 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8647 
8648 	if (!soc) {
8649 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8650 			"%s: soc is NULL", __func__);
8651 		return QDF_STATUS_E_INVAL;
8652 	}
8653 
8654 	switch (value) {
8655 	case CDP_TXRX_PATH_STATS:
8656 		dp_txrx_path_stats(soc);
8657 		break;
8658 
8659 	case CDP_RX_RING_STATS:
8660 		dp_print_per_ring_stats(soc);
8661 		break;
8662 
8663 	case CDP_TXRX_TSO_STATS:
8664 		/* TODO: NOT IMPLEMENTED */
8665 		break;
8666 
8667 	case CDP_DUMP_TX_FLOW_POOL_INFO:
8668 		cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
8669 		break;
8670 
8671 	case CDP_DP_NAPI_STATS:
8672 		dp_print_napi_stats(soc);
8673 		break;
8674 
8675 	case CDP_TXRX_DESC_STATS:
8676 		/* TODO: NOT IMPLEMENTED */
8677 		break;
8678 
8679 	default:
8680 		status = QDF_STATUS_E_INVAL;
8681 		break;
8682 	}
8683 
8684 	return status;
8685 
8686 }
8687 
8688 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
8689 /**
8690  * dp_update_flow_control_parameters() - API to store datapath
8691  *                            config parameters
8692  * @soc: soc handle
8693  * @cfg: ini parameter handle
8694  *
8695  * Return: void
8696  */
8697 static inline
8698 void dp_update_flow_control_parameters(struct dp_soc *soc,
8699 				struct cdp_config_params *params)
8700 {
8701 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
8702 					params->tx_flow_stop_queue_threshold;
8703 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
8704 					params->tx_flow_start_queue_offset;
8705 }
8706 #else
8707 static inline
8708 void dp_update_flow_control_parameters(struct dp_soc *soc,
8709 				struct cdp_config_params *params)
8710 {
8711 }
8712 #endif
8713 
8714 /**
8715  * dp_update_config_parameters() - API to store datapath
8716  *                            config parameters
8717  * @soc: soc handle
8718  * @cfg: ini parameter handle
8719  *
8720  * Return: status
8721  */
8722 static
8723 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
8724 				struct cdp_config_params *params)
8725 {
8726 	struct dp_soc *soc = (struct dp_soc *)psoc;
8727 
8728 	if (!(soc)) {
8729 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8730 				"%s: Invalid handle", __func__);
8731 		return QDF_STATUS_E_INVAL;
8732 	}
8733 
8734 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
8735 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
8736 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
8737 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
8738 				params->tcp_udp_checksumoffload;
8739 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
8740 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
8741 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
8742 
8743 	dp_update_flow_control_parameters(soc, params);
8744 
8745 	return QDF_STATUS_SUCCESS;
8746 }
8747 
8748 /**
8749  * dp_txrx_set_wds_rx_policy() - API to store datapath
8750  *                            config parameters
8751  * @vdev_handle - datapath vdev handle
8752  * @cfg: ini parameter handle
8753  *
8754  * Return: status
8755  */
8756 #ifdef WDS_VENDOR_EXTENSION
8757 void
8758 dp_txrx_set_wds_rx_policy(
8759 		struct cdp_vdev *vdev_handle,
8760 		u_int32_t val)
8761 {
8762 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8763 	struct dp_peer *peer;
8764 	if (vdev->opmode == wlan_op_mode_ap) {
8765 		/* for ap, set it on bss_peer */
8766 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
8767 			if (peer->bss_peer) {
8768 				peer->wds_ecm.wds_rx_filter = 1;
8769 				peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
8770 				peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
8771 				break;
8772 			}
8773 		}
8774 	} else if (vdev->opmode == wlan_op_mode_sta) {
8775 		peer = TAILQ_FIRST(&vdev->peer_list);
8776 		peer->wds_ecm.wds_rx_filter = 1;
8777 		peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
8778 		peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
8779 	}
8780 }
8781 
8782 /**
8783  * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
8784  *
8785  * @peer_handle - datapath peer handle
8786  * @wds_tx_ucast: policy for unicast transmission
8787  * @wds_tx_mcast: policy for multicast transmission
8788  *
8789  * Return: void
8790  */
8791 void
8792 dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
8793 		int wds_tx_ucast, int wds_tx_mcast)
8794 {
8795 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8796 	if (wds_tx_ucast || wds_tx_mcast) {
8797 		peer->wds_enabled = 1;
8798 		peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
8799 		peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
8800 	} else {
8801 		peer->wds_enabled = 0;
8802 		peer->wds_ecm.wds_tx_ucast_4addr = 0;
8803 		peer->wds_ecm.wds_tx_mcast_4addr = 0;
8804 	}
8805 
8806 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8807 			FL("Policy Update set to :\
8808 				peer->wds_enabled %d\
8809 				peer->wds_ecm.wds_tx_ucast_4addr %d\
8810 				peer->wds_ecm.wds_tx_mcast_4addr %d"),
8811 				peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
8812 				peer->wds_ecm.wds_tx_mcast_4addr);
8813 	return;
8814 }
8815 #endif
8816 
8817 static struct cdp_wds_ops dp_ops_wds = {
8818 	.vdev_set_wds = dp_vdev_set_wds,
8819 #ifdef WDS_VENDOR_EXTENSION
8820 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
8821 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
8822 #endif
8823 };
8824 
8825 /*
8826  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
8827  * @vdev_handle - datapath vdev handle
8828  * @callback - callback function
8829  * @ctxt: callback context
8830  *
8831  */
8832 static void
8833 dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
8834 		       ol_txrx_data_tx_cb callback, void *ctxt)
8835 {
8836 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8837 
8838 	vdev->tx_non_std_data_callback.func = callback;
8839 	vdev->tx_non_std_data_callback.ctxt = ctxt;
8840 }
8841 
8842 /**
8843  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
8844  * @pdev_hdl: datapath pdev handle
8845  *
8846  * Return: opaque pointer to dp txrx handle
8847  */
8848 static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
8849 {
8850 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
8851 
8852 	return pdev->dp_txrx_handle;
8853 }
8854 
8855 /**
8856  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
8857  * @pdev_hdl: datapath pdev handle
8858  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
8859  *
8860  * Return: void
8861  */
8862 static void
8863 dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
8864 {
8865 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
8866 
8867 	pdev->dp_txrx_handle = dp_txrx_hdl;
8868 }
8869 
8870 /**
8871  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
8872  * @soc_handle: datapath soc handle
8873  *
8874  * Return: opaque pointer to external dp (non-core DP)
8875  */
8876 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
8877 {
8878 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8879 
8880 	return soc->external_txrx_handle;
8881 }
8882 
8883 /**
8884  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
8885  * @soc_handle: datapath soc handle
8886  * @txrx_handle: opaque pointer to external dp (non-core DP)
8887  *
8888  * Return: void
8889  */
8890 static void
8891 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
8892 {
8893 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8894 
8895 	soc->external_txrx_handle = txrx_handle;
8896 }
8897 
8898 /**
8899  * dp_get_cfg_capabilities() - get dp capabilities
8900  * @soc_handle: datapath soc handle
8901  * @dp_caps: enum for dp capabilities
8902  *
8903  * Return: bool to determine if dp caps is enabled
8904  */
8905 static bool
8906 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
8907 			enum cdp_capabilities dp_caps)
8908 {
8909 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8910 
8911 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
8912 }
8913 
8914 #ifdef FEATURE_AST
8915 static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
8916 {
8917 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
8918 	struct dp_peer *peer = (struct dp_peer *) peer_hdl;
8919 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
8920 
8921 	/*
8922 	 * For BSS peer, new peer is not created on alloc_node if the
8923 	 * peer with same address already exists , instead refcnt is
8924 	 * increased for existing peer. Correspondingly in delete path,
8925 	 * only refcnt is decreased; and peer is only deleted , when all
8926 	 * references are deleted. So delete_in_progress should not be set
8927 	 * for bss_peer, unless only 2 reference remains (peer map reference
8928 	 * and peer hash table reference).
8929 	 */
8930 	if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
8931 		return;
8932 	}
8933 
8934 	qdf_spin_lock_bh(&soc->ast_lock);
8935 	peer->delete_in_progress = true;
8936 	dp_peer_delete_ast_entries(soc, peer);
8937 	qdf_spin_unlock_bh(&soc->ast_lock);
8938 }
8939 #endif
8940 
8941 #ifdef ATH_SUPPORT_NAC_RSSI
8942 /**
8943  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
8944  * @vdev_hdl: DP vdev handle
8945  * @rssi: rssi value
8946  *
8947  * Return: 0 for success. nonzero for failure.
8948  */
8949 static QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
8950 					      char *mac_addr,
8951 					      uint8_t *rssi)
8952 {
8953 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8954 	struct dp_pdev *pdev = vdev->pdev;
8955 	struct dp_neighbour_peer *peer = NULL;
8956 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
8957 
8958 	*rssi = 0;
8959 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
8960 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
8961 		      neighbour_peer_list_elem) {
8962 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
8963 				mac_addr, DP_MAC_ADDR_LEN) == 0) {
8964 			*rssi = peer->rssi;
8965 			status = QDF_STATUS_SUCCESS;
8966 			break;
8967 		}
8968 	}
8969 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
8970 	return status;
8971 }
8972 
8973 static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
8974 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
8975 		uint8_t chan_num)
8976 {
8977 
8978 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8979 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
8980 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
8981 
8982 	pdev->nac_rssi_filtering = 1;
8983 	/* Store address of NAC (neighbour peer) which will be checked
8984 	 * against TA of received packets.
8985 	 */
8986 
8987 	if (cmd == CDP_NAC_PARAM_ADD) {
8988 		dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
8989 						 client_macaddr);
8990 	} else if (cmd == CDP_NAC_PARAM_DEL) {
8991 		dp_update_filter_neighbour_peers(vdev_handle,
8992 						 DP_NAC_PARAM_DEL,
8993 						 client_macaddr);
8994 	}
8995 
8996 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
8997 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
8998 			((void *)vdev->pdev->ctrl_pdev,
8999 			 vdev->vdev_id, cmd, bssid);
9000 
9001 	return QDF_STATUS_SUCCESS;
9002 }
9003 #endif
9004 
9005 /**
9006  * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
9007  * for pktlog
9008  * @txrx_pdev_handle: cdp_pdev handle
9009  * @enb_dsb: Enable or disable peer based filtering
9010  *
9011  * Return: QDF_STATUS
9012  */
9013 static int
9014 dp_enable_peer_based_pktlog(
9015 	struct cdp_pdev *txrx_pdev_handle,
9016 	char *mac_addr, uint8_t enb_dsb)
9017 {
9018 	struct dp_peer *peer;
9019 	uint8_t local_id;
9020 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev_handle;
9021 
9022 	peer = (struct dp_peer *)dp_find_peer_by_addr(txrx_pdev_handle,
9023 			mac_addr, &local_id);
9024 
9025 	if (!peer) {
9026 		dp_err("Invalid Peer");
9027 		return QDF_STATUS_E_FAILURE;
9028 	}
9029 
9030 	peer->peer_based_pktlog_filter = enb_dsb;
9031 	pdev->dp_peer_based_pktlog = enb_dsb;
9032 
9033 	return QDF_STATUS_SUCCESS;
9034 }
9035 
9036 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
9037 					   uint32_t max_peers,
9038 					   uint32_t max_ast_index,
9039 					   bool peer_map_unmap_v2)
9040 {
9041 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9042 
9043 	soc->max_peers = max_peers;
9044 
9045 	qdf_print ("%s max_peers %u, max_ast_index: %u\n",
9046 		   __func__, max_peers, max_ast_index);
9047 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
9048 
9049 	if (dp_peer_find_attach(soc))
9050 		return QDF_STATUS_E_FAILURE;
9051 
9052 	soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
9053 
9054 	return QDF_STATUS_SUCCESS;
9055 }
9056 
9057 /**
9058  * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
9059  * @dp_pdev: dp pdev handle
9060  * @ctrl_pdev: UMAC ctrl pdev handle
9061  *
9062  * Return: void
9063  */
9064 static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
9065 				  struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
9066 {
9067 	struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
9068 
9069 	pdev->ctrl_pdev = ctrl_pdev;
9070 }
9071 
9072 /*
9073  * dp_get_cfg() - get dp cfg
9074  * @soc: cdp soc handle
9075  * @cfg: cfg enum
9076  *
9077  * Return: cfg value
9078  */
9079 static uint32_t dp_get_cfg(void *soc, enum cdp_dp_cfg cfg)
9080 {
9081 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
9082 	uint32_t value = 0;
9083 
9084 	switch (cfg) {
9085 	case cfg_dp_enable_data_stall:
9086 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
9087 		break;
9088 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
9089 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
9090 		break;
9091 	case cfg_dp_tso_enable:
9092 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
9093 		break;
9094 	case cfg_dp_lro_enable:
9095 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
9096 		break;
9097 	case cfg_dp_gro_enable:
9098 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
9099 		break;
9100 	case cfg_dp_tx_flow_start_queue_offset:
9101 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
9102 		break;
9103 	case cfg_dp_tx_flow_stop_queue_threshold:
9104 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
9105 		break;
9106 	case cfg_dp_disable_intra_bss_fwd:
9107 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
9108 		break;
9109 	default:
9110 		value =  0;
9111 	}
9112 
9113 	return value;
9114 }
9115 
9116 static struct cdp_cmn_ops dp_ops_cmn = {
9117 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
9118 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
9119 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
9120 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
9121 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
9122 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
9123 	.txrx_peer_create = dp_peer_create_wifi3,
9124 	.txrx_peer_setup = dp_peer_setup_wifi3,
9125 #ifdef FEATURE_AST
9126 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
9127 #else
9128 	.txrx_peer_teardown = NULL,
9129 #endif
9130 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
9131 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
9132 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
9133 	.txrx_peer_get_ast_info_by_pdev =
9134 		dp_peer_get_ast_info_by_pdevid_wifi3,
9135 	.txrx_peer_ast_delete_by_soc =
9136 		dp_peer_ast_entry_del_by_soc,
9137 	.txrx_peer_ast_delete_by_pdev =
9138 		dp_peer_ast_entry_del_by_pdev,
9139 	.txrx_peer_delete = dp_peer_delete_wifi3,
9140 	.txrx_vdev_register = dp_vdev_register_wifi3,
9141 	.txrx_vdev_flush_peers = dp_vdev_flush_peers,
9142 	.txrx_soc_detach = dp_soc_detach_wifi3,
9143 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
9144 	.txrx_soc_init = dp_soc_init_wifi3,
9145 	.txrx_tso_soc_attach = dp_tso_soc_attach,
9146 	.txrx_tso_soc_detach = dp_tso_soc_detach,
9147 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
9148 	.txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
9149 	.txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
9150 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
9151 	.txrx_ath_getstats = dp_get_device_stats,
9152 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
9153 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
9154 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
9155 	.delba_process = dp_delba_process_wifi3,
9156 	.set_addba_response = dp_set_addba_response,
9157 	.get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
9158 	.flush_cache_rx_queue = NULL,
9159 	/* TODO: get API's for dscp-tid need to be added*/
9160 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
9161 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
9162 	.hmmc_tid_override_en = dp_hmmc_tid_override_en_wifi3,
9163 	.set_hmmc_tid_val = dp_set_hmmc_tid_val_wifi3,
9164 	.txrx_get_total_per = dp_get_total_per,
9165 	.txrx_stats_request = dp_txrx_stats_request,
9166 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
9167 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
9168 	.txrx_get_vow_config_frm_pdev = NULL,
9169 	.txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
9170 	.txrx_set_nac = dp_set_nac,
9171 	.txrx_get_tx_pending = dp_get_tx_pending,
9172 	.txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
9173 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
9174 	.display_stats = dp_txrx_dump_stats,
9175 	.txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
9176 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
9177 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
9178 	.txrx_intr_detach = dp_soc_interrupt_detach,
9179 	.set_pn_check = dp_set_pn_check_wifi3,
9180 	.update_config_parameters = dp_update_config_parameters,
9181 	/* TODO: Add other functions */
9182 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
9183 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
9184 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
9185 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
9186 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
9187 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
9188 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
9189 	.tx_send = dp_tx_send,
9190 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
9191 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
9192 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
9193 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
9194 	.txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
9195 	.txrx_get_os_rx_handles_from_vdev =
9196 					dp_get_os_rx_handles_from_vdev_wifi3,
9197 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
9198 	.get_dp_capabilities = dp_get_cfg_capabilities,
9199 	.txrx_get_cfg = dp_get_cfg,
9200 };
9201 
9202 static struct cdp_ctrl_ops dp_ops_ctrl = {
9203 	.txrx_peer_authorize = dp_peer_authorize,
9204 	.txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
9205 	.txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
9206 #ifdef MESH_MODE_SUPPORT
9207 	.txrx_set_mesh_mode  = dp_peer_set_mesh_mode,
9208 	.txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
9209 #endif
9210 	.txrx_set_vdev_param = dp_set_vdev_param,
9211 	.txrx_peer_set_nawds = dp_peer_set_nawds,
9212 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
9213 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
9214 	.txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
9215 	.txrx_update_filter_neighbour_peers =
9216 		dp_update_filter_neighbour_peers,
9217 	.txrx_get_sec_type = dp_get_sec_type,
9218 	/* TODO: Add other functions */
9219 	.txrx_wdi_event_sub = dp_wdi_event_sub,
9220 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
9221 #ifdef WDI_EVENT_ENABLE
9222 	.txrx_get_pldev = dp_get_pldev,
9223 #endif
9224 	.txrx_set_pdev_param = dp_set_pdev_param,
9225 #ifdef ATH_SUPPORT_NAC_RSSI
9226 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
9227 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
9228 #endif
9229 	.set_key = dp_set_michael_key,
9230 	.txrx_get_vdev_param = dp_get_vdev_param,
9231 	.enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
9232 };
9233 
9234 static struct cdp_me_ops dp_ops_me = {
9235 #ifdef ATH_SUPPORT_IQUE
9236 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
9237 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
9238 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
9239 #endif
9240 };
9241 
9242 static struct cdp_mon_ops dp_ops_mon = {
9243 	.txrx_monitor_set_filter_ucast_data = NULL,
9244 	.txrx_monitor_set_filter_mcast_data = NULL,
9245 	.txrx_monitor_set_filter_non_data = NULL,
9246 	.txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
9247 	.txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
9248 	.txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
9249 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
9250 	/* Added support for HK advance filter */
9251 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
9252 };
9253 
9254 static struct cdp_host_stats_ops dp_ops_host_stats = {
9255 	.txrx_per_peer_stats = dp_get_host_peer_stats,
9256 	.get_fw_peer_stats = dp_get_fw_peer_stats,
9257 	.get_htt_stats = dp_get_htt_stats,
9258 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
9259 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
9260 	.txrx_stats_publish = dp_txrx_stats_publish,
9261 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
9262 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
9263 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
9264 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
9265 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
9266 	/* TODO */
9267 };
9268 
9269 static struct cdp_raw_ops dp_ops_raw = {
9270 	/* TODO */
9271 };
9272 
9273 #ifdef CONFIG_WIN
9274 static struct cdp_pflow_ops dp_ops_pflow = {
9275 	/* TODO */
9276 };
9277 #endif /* CONFIG_WIN */
9278 
9279 #ifdef FEATURE_RUNTIME_PM
9280 /**
9281  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
9282  * @opaque_pdev: DP pdev context
9283  *
9284  * DP is ready to runtime suspend if there are no pending TX packets.
9285  *
9286  * Return: QDF_STATUS
9287  */
9288 static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
9289 {
9290 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
9291 	struct dp_soc *soc = pdev->soc;
9292 
9293 	/* Abort if there are any pending TX packets */
9294 	if (dp_get_tx_pending(opaque_pdev) > 0) {
9295 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
9296 			  FL("Abort suspend due to pending TX packets"));
9297 		return QDF_STATUS_E_AGAIN;
9298 	}
9299 
9300 	if (soc->intr_mode == DP_INTR_POLL)
9301 		qdf_timer_stop(&soc->int_timer);
9302 
9303 	return QDF_STATUS_SUCCESS;
9304 }
9305 
9306 /**
9307  * dp_runtime_resume() - ensure DP is ready to runtime resume
9308  * @opaque_pdev: DP pdev context
9309  *
9310  * Resume DP for runtime PM.
9311  *
9312  * Return: QDF_STATUS
9313  */
9314 static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
9315 {
9316 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
9317 	struct dp_soc *soc = pdev->soc;
9318 	void *hal_srng;
9319 	int i;
9320 
9321 	if (soc->intr_mode == DP_INTR_POLL)
9322 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
9323 
9324 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
9325 		hal_srng = soc->tcl_data_ring[i].hal_srng;
9326 		if (hal_srng) {
9327 			/* We actually only need to acquire the lock */
9328 			hal_srng_access_start(soc->hal_soc, hal_srng);
9329 			/* Update SRC ring head pointer for HW to send
9330 			   all pending packets */
9331 			hal_srng_access_end(soc->hal_soc, hal_srng);
9332 		}
9333 	}
9334 
9335 	return QDF_STATUS_SUCCESS;
9336 }
9337 #endif /* FEATURE_RUNTIME_PM */
9338 
9339 #ifndef CONFIG_WIN
9340 static struct cdp_misc_ops dp_ops_misc = {
9341 #ifdef FEATURE_WLAN_TDLS
9342 	.tx_non_std = dp_tx_non_std,
9343 #endif /* FEATURE_WLAN_TDLS */
9344 	.get_opmode = dp_get_opmode,
9345 #ifdef FEATURE_RUNTIME_PM
9346 	.runtime_suspend = dp_runtime_suspend,
9347 	.runtime_resume = dp_runtime_resume,
9348 #endif /* FEATURE_RUNTIME_PM */
9349 	.pkt_log_init = dp_pkt_log_init,
9350 	.pkt_log_con_service = dp_pkt_log_con_service,
9351 	.get_num_rx_contexts = dp_get_num_rx_contexts,
9352 };
9353 
9354 static struct cdp_flowctl_ops dp_ops_flowctl = {
9355 	/* WIFI 3.0 DP implement as required. */
9356 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
9357 	.flow_pool_map_handler = dp_tx_flow_pool_map,
9358 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
9359 	.register_pause_cb = dp_txrx_register_pause_cb,
9360 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
9361 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
9362 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
9363 };
9364 
9365 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
9366 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9367 };
9368 
9369 #ifdef IPA_OFFLOAD
9370 static struct cdp_ipa_ops dp_ops_ipa = {
9371 	.ipa_get_resource = dp_ipa_get_resource,
9372 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
9373 	.ipa_op_response = dp_ipa_op_response,
9374 	.ipa_register_op_cb = dp_ipa_register_op_cb,
9375 	.ipa_get_stat = dp_ipa_get_stat,
9376 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
9377 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
9378 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
9379 	.ipa_setup = dp_ipa_setup,
9380 	.ipa_cleanup = dp_ipa_cleanup,
9381 	.ipa_setup_iface = dp_ipa_setup_iface,
9382 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
9383 	.ipa_enable_pipes = dp_ipa_enable_pipes,
9384 	.ipa_disable_pipes = dp_ipa_disable_pipes,
9385 	.ipa_set_perf_level = dp_ipa_set_perf_level
9386 };
9387 #endif
9388 
9389 static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
9390 {
9391 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
9392 	struct dp_soc *soc = pdev->soc;
9393 	int timeout = SUSPEND_DRAIN_WAIT;
9394 	int drain_wait_delay = 50; /* 50 ms */
9395 
9396 	/* Abort if there are any pending TX packets */
9397 	while (dp_get_tx_pending(opaque_pdev) > 0) {
9398 		qdf_sleep(drain_wait_delay);
9399 		if (timeout <= 0) {
9400 			dp_err("TX frames are pending, abort suspend");
9401 			return QDF_STATUS_E_TIMEOUT;
9402 		}
9403 		timeout = timeout - drain_wait_delay;
9404 	}
9405 
9406 	if (soc->intr_mode == DP_INTR_POLL)
9407 		qdf_timer_stop(&soc->int_timer);
9408 
9409 	return QDF_STATUS_SUCCESS;
9410 }
9411 
9412 static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
9413 {
9414 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
9415 	struct dp_soc *soc = pdev->soc;
9416 
9417 	if (soc->intr_mode == DP_INTR_POLL)
9418 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
9419 
9420 	return QDF_STATUS_SUCCESS;
9421 }
9422 
9423 static struct cdp_bus_ops dp_ops_bus = {
9424 	.bus_suspend = dp_bus_suspend,
9425 	.bus_resume = dp_bus_resume
9426 };
9427 
9428 static struct cdp_ocb_ops dp_ops_ocb = {
9429 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9430 };
9431 
9432 
9433 static struct cdp_throttle_ops dp_ops_throttle = {
9434 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9435 };
9436 
9437 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
9438 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9439 };
9440 
9441 static struct cdp_cfg_ops dp_ops_cfg = {
9442 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9443 };
9444 
9445 /*
9446  * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
9447  * @dev: physical device instance
9448  * @peer_mac_addr: peer mac address
9449  * @local_id: local id for the peer
9450  * @debug_id: to track enum peer access
9451  *
9452  * Return: peer instance pointer
9453  */
9454 static inline void *
9455 dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
9456 			     uint8_t *local_id,
9457 			     enum peer_debug_id_type debug_id)
9458 {
9459 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
9460 	struct dp_peer *peer;
9461 
9462 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
9463 
9464 	if (!peer)
9465 		return NULL;
9466 
9467 	*local_id = peer->local_id;
9468 	DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
9469 
9470 	return peer;
9471 }
9472 
9473 /*
9474  * dp_peer_release_ref - release peer ref count
9475  * @peer: peer handle
9476  * @debug_id: to track enum peer access
9477  *
9478  * Return: None
9479  */
9480 static inline
9481 void dp_peer_release_ref(void *peer, enum peer_debug_id_type debug_id)
9482 {
9483 	dp_peer_unref_delete(peer);
9484 }
9485 
9486 static struct cdp_peer_ops dp_ops_peer = {
9487 	.register_peer = dp_register_peer,
9488 	.clear_peer = dp_clear_peer,
9489 	.find_peer_by_addr = dp_find_peer_by_addr,
9490 	.find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
9491 	.peer_get_ref_by_addr = dp_peer_get_ref_find_by_addr,
9492 	.peer_release_ref = dp_peer_release_ref,
9493 	.local_peer_id = dp_local_peer_id,
9494 	.peer_find_by_local_id = dp_peer_find_by_local_id,
9495 	.peer_state_update = dp_peer_state_update,
9496 	.get_vdevid = dp_get_vdevid,
9497 	.get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
9498 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
9499 	.get_vdev_for_peer = dp_get_vdev_for_peer,
9500 	.get_peer_state = dp_get_peer_state,
9501 };
9502 #endif
9503 
9504 static struct cdp_ops dp_txrx_ops = {
9505 	.cmn_drv_ops = &dp_ops_cmn,
9506 	.ctrl_ops = &dp_ops_ctrl,
9507 	.me_ops = &dp_ops_me,
9508 	.mon_ops = &dp_ops_mon,
9509 	.host_stats_ops = &dp_ops_host_stats,
9510 	.wds_ops = &dp_ops_wds,
9511 	.raw_ops = &dp_ops_raw,
9512 #ifdef CONFIG_WIN
9513 	.pflow_ops = &dp_ops_pflow,
9514 #endif /* CONFIG_WIN */
9515 #ifndef CONFIG_WIN
9516 	.misc_ops = &dp_ops_misc,
9517 	.cfg_ops = &dp_ops_cfg,
9518 	.flowctl_ops = &dp_ops_flowctl,
9519 	.l_flowctl_ops = &dp_ops_l_flowctl,
9520 #ifdef IPA_OFFLOAD
9521 	.ipa_ops = &dp_ops_ipa,
9522 #endif
9523 	.bus_ops = &dp_ops_bus,
9524 	.ocb_ops = &dp_ops_ocb,
9525 	.peer_ops = &dp_ops_peer,
9526 	.throttle_ops = &dp_ops_throttle,
9527 	.mob_stats_ops = &dp_ops_mob_stats,
9528 #endif
9529 };
9530 
9531 /*
9532  * dp_soc_set_txrx_ring_map()
9533  * @dp_soc: DP handler for soc
9534  *
9535  * Return: Void
9536  */
9537 static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
9538 {
9539 	uint32_t i;
9540 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
9541 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
9542 	}
9543 }
9544 
9545 #ifdef QCA_WIFI_QCA8074
9546 
9547 #ifndef QCA_MEM_ATTACH_ON_WIFI3
9548 
9549 /**
9550  * dp_soc_attach_wifi3() - Attach txrx SOC
9551  * @ctrl_psoc: Opaque SOC handle from control plane
9552  * @htc_handle: Opaque HTC handle
9553  * @hif_handle: Opaque HIF handle
9554  * @qdf_osdev: QDF device
9555  * @ol_ops: Offload Operations
9556  * @device_id: Device ID
9557  *
9558  * Return: DP SOC handle on success, NULL on failure
9559  */
9560 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
9561 			  HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9562 			  struct ol_if_ops *ol_ops, uint16_t device_id)
9563 {
9564 	struct dp_soc *dp_soc =  NULL;
9565 
9566 	dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
9567 			       ol_ops, device_id);
9568 	if (!dp_soc)
9569 		return NULL;
9570 
9571 	if (!dp_soc_init(dp_soc, htc_handle, hif_handle))
9572 		return NULL;
9573 
9574 	return (void *)dp_soc;
9575 }
9576 #else
9577 
9578 /**
9579  * dp_soc_attach_wifi3() - Attach txrx SOC
9580  * @ctrl_psoc: Opaque SOC handle from control plane
9581  * @htc_handle: Opaque HTC handle
9582  * @hif_handle: Opaque HIF handle
9583  * @qdf_osdev: QDF device
9584  * @ol_ops: Offload Operations
9585  * @device_id: Device ID
9586  *
9587  * Return: DP SOC handle on success, NULL on failure
9588  */
9589 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
9590 			  HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9591 			  struct ol_if_ops *ol_ops, uint16_t device_id)
9592 {
9593 	struct dp_soc *dp_soc = NULL;
9594 
9595 	dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
9596 			       ol_ops, device_id);
9597 	return (void *)dp_soc;
9598 }
9599 
9600 #endif
9601 
9602 /**
9603  * dp_soc_attach() - Attach txrx SOC
9604  * @ctrl_psoc: Opaque SOC handle from control plane
9605  * @htc_handle: Opaque HTC handle
9606  * @qdf_osdev: QDF device
9607  * @ol_ops: Offload Operations
9608  * @device_id: Device ID
9609  *
9610  * Return: DP SOC handle on success, NULL on failure
9611  */
9612 static struct dp_soc *
9613 dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9614 	      struct ol_if_ops *ol_ops, uint16_t device_id)
9615 {
9616 	int int_ctx;
9617 	struct dp_soc *soc =  NULL;
9618 	struct htt_soc *htt_soc = NULL;
9619 
9620 	soc = qdf_mem_malloc(sizeof(*soc));
9621 
9622 	if (!soc) {
9623 		dp_err("DP SOC memory allocation failed");
9624 		goto fail0;
9625 	}
9626 
9627 	int_ctx = 0;
9628 	soc->device_id = device_id;
9629 	soc->cdp_soc.ops = &dp_txrx_ops;
9630 	soc->cdp_soc.ol_ops = ol_ops;
9631 	soc->ctrl_psoc = ctrl_psoc;
9632 	soc->osdev = qdf_osdev;
9633 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
9634 
9635 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
9636 	if (!soc->wlan_cfg_ctx) {
9637 		dp_err("wlan_cfg_ctx failed\n");
9638 		goto fail1;
9639 	}
9640 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
9641 	if (!htt_soc) {
9642 		dp_err("HTT attach failed");
9643 		goto fail1;
9644 	}
9645 	soc->htt_handle = htt_soc;
9646 	htt_soc->dp_soc = soc;
9647 	htt_soc->htc_soc = htc_handle;
9648 
9649 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
9650 		goto fail2;
9651 
9652 	return (void *)soc;
9653 fail2:
9654 	qdf_mem_free(htt_soc);
9655 fail1:
9656 	qdf_mem_free(soc);
9657 fail0:
9658 	return NULL;
9659 }
9660 
9661 /**
9662  * dp_soc_init() - Initialize txrx SOC
9663  * @dp_soc: Opaque DP SOC handle
9664  * @htc_handle: Opaque HTC handle
9665  * @hif_handle: Opaque HIF handle
9666  *
9667  * Return: DP SOC handle on success, NULL on failure
9668  */
9669 void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle)
9670 {
9671 	int target_type;
9672 	struct dp_soc *soc = (struct dp_soc *)dpsoc;
9673 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
9674 
9675 	htt_soc->htc_soc = htc_handle;
9676 	soc->hif_handle = hif_handle;
9677 
9678 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
9679 	if (!soc->hal_soc)
9680 		return NULL;
9681 
9682 	htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc, htt_soc->htc_soc,
9683 			   soc->hal_soc, soc->osdev);
9684 	target_type = hal_get_target_type(soc->hal_soc);
9685 	switch (target_type) {
9686 	case TARGET_TYPE_QCA6290:
9687 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9688 					       REO_DST_RING_SIZE_QCA6290);
9689 		soc->ast_override_support = 1;
9690 		break;
9691 #ifdef QCA_WIFI_QCA6390
9692 	case TARGET_TYPE_QCA6390:
9693 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9694 					       REO_DST_RING_SIZE_QCA6290);
9695 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
9696 		soc->ast_override_support = 1;
9697 		if (con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
9698 			int int_ctx;
9699 
9700 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
9701 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
9702 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
9703 			}
9704 		}
9705 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
9706 		break;
9707 #endif
9708 	case TARGET_TYPE_QCA8074:
9709 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9710 					       REO_DST_RING_SIZE_QCA8074);
9711 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
9712 		break;
9713 	case TARGET_TYPE_QCA8074V2:
9714 	case TARGET_TYPE_QCA6018:
9715 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9716 					       REO_DST_RING_SIZE_QCA8074);
9717 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
9718 		soc->hw_nac_monitor_support = 1;
9719 		soc->ast_override_support = 1;
9720 		soc->per_tid_basize_max_tid = 8;
9721 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
9722 		break;
9723 	default:
9724 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
9725 		qdf_assert_always(0);
9726 		break;
9727 	}
9728 
9729 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
9730 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
9731 	soc->cce_disable = false;
9732 
9733 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
9734 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
9735 				CDP_CFG_MAX_PEER_ID);
9736 
9737 		if (ret != -EINVAL) {
9738 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
9739 		}
9740 
9741 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
9742 				CDP_CFG_CCE_DISABLE);
9743 		if (ret == 1)
9744 			soc->cce_disable = true;
9745 	}
9746 
9747 	qdf_spinlock_create(&soc->peer_ref_mutex);
9748 	qdf_spinlock_create(&soc->ast_lock);
9749 
9750 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
9751 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
9752 
9753 	/* fill the tx/rx cpu ring map*/
9754 	dp_soc_set_txrx_ring_map(soc);
9755 
9756 	qdf_spinlock_create(&soc->htt_stats.lock);
9757 	/* initialize work queue for stats processing */
9758 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
9759 
9760 	return soc;
9761 
9762 }
9763 
9764 /**
9765  * dp_soc_init_wifi3() - Initialize txrx SOC
9766  * @dp_soc: Opaque DP SOC handle
9767  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
9768  * @hif_handle: Opaque HIF handle
9769  * @htc_handle: Opaque HTC handle
9770  * @qdf_osdev: QDF device (Unused)
9771  * @ol_ops: Offload Operations (Unused)
9772  * @device_id: Device ID (Unused)
9773  *
9774  * Return: DP SOC handle on success, NULL on failure
9775  */
9776 void *dp_soc_init_wifi3(void *dpsoc, void *ctrl_psoc, void *hif_handle,
9777 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9778 			struct ol_if_ops *ol_ops, uint16_t device_id)
9779 {
9780 	return dp_soc_init(dpsoc, htc_handle, hif_handle);
9781 }
9782 
9783 #endif
9784 
9785 /*
9786  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
9787  *
9788  * @soc: handle to DP soc
9789  * @mac_id: MAC id
9790  *
9791  * Return: Return pdev corresponding to MAC
9792  */
9793 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
9794 {
9795 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
9796 		return soc->pdev_list[mac_id];
9797 
9798 	/* Typically for MCL as there only 1 PDEV*/
9799 	return soc->pdev_list[0];
9800 }
9801 
9802 /*
9803  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
9804  * @soc:		DP SoC context
9805  * @max_mac_rings:	No of MAC rings
9806  *
9807  * Return: None
9808  */
9809 static
9810 void dp_is_hw_dbs_enable(struct dp_soc *soc,
9811 				int *max_mac_rings)
9812 {
9813 	bool dbs_enable = false;
9814 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
9815 		dbs_enable = soc->cdp_soc.ol_ops->
9816 		is_hw_dbs_2x2_capable(soc->ctrl_psoc);
9817 
9818 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
9819 }
9820 
9821 /*
9822 * dp_is_soc_reinit() - Check if soc reinit is true
9823 * @soc: DP SoC context
9824 *
9825 * Return: true or false
9826 */
9827 bool dp_is_soc_reinit(struct dp_soc *soc)
9828 {
9829 	return soc->dp_soc_reinit;
9830 }
9831 
9832 /*
9833 * dp_set_pktlog_wifi3() - attach txrx vdev
9834 * @pdev: Datapath PDEV handle
9835 * @event: which event's notifications are being subscribed to
9836 * @enable: WDI event subscribe or not. (True or False)
9837 *
9838 * Return: Success, NULL on failure
9839 */
9840 #ifdef WDI_EVENT_ENABLE
9841 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
9842 	bool enable)
9843 {
9844 	struct dp_soc *soc = NULL;
9845 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
9846 	int max_mac_rings = wlan_cfg_get_num_mac_rings
9847 					(pdev->wlan_cfg_ctx);
9848 	uint8_t mac_id = 0;
9849 
9850 	soc = pdev->soc;
9851 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
9852 
9853 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
9854 			FL("Max_mac_rings %d "),
9855 			max_mac_rings);
9856 
9857 	if (enable) {
9858 		switch (event) {
9859 		case WDI_EVENT_RX_DESC:
9860 			if (pdev->monitor_vdev) {
9861 				/* Nothing needs to be done if monitor mode is
9862 				 * enabled
9863 				 */
9864 				return 0;
9865 			}
9866 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
9867 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
9868 				htt_tlv_filter.mpdu_start = 1;
9869 				htt_tlv_filter.msdu_start = 1;
9870 				htt_tlv_filter.msdu_end = 1;
9871 				htt_tlv_filter.mpdu_end = 1;
9872 				htt_tlv_filter.packet_header = 1;
9873 				htt_tlv_filter.attention = 1;
9874 				htt_tlv_filter.ppdu_start = 1;
9875 				htt_tlv_filter.ppdu_end = 1;
9876 				htt_tlv_filter.ppdu_end_user_stats = 1;
9877 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
9878 				htt_tlv_filter.ppdu_end_status_done = 1;
9879 				htt_tlv_filter.enable_fp = 1;
9880 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
9881 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
9882 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
9883 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
9884 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
9885 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
9886 
9887 				for (mac_id = 0; mac_id < max_mac_rings;
9888 								mac_id++) {
9889 					int mac_for_pdev =
9890 						dp_get_mac_id_for_pdev(mac_id,
9891 								pdev->pdev_id);
9892 
9893 					htt_h2t_rx_ring_cfg(soc->htt_handle,
9894 					 mac_for_pdev,
9895 					 pdev->rxdma_mon_status_ring[mac_id]
9896 					 .hal_srng,
9897 					 RXDMA_MONITOR_STATUS,
9898 					 RX_BUFFER_SIZE,
9899 					 &htt_tlv_filter);
9900 
9901 				}
9902 
9903 				if (soc->reap_timer_init)
9904 					qdf_timer_mod(&soc->mon_reap_timer,
9905 					DP_INTR_POLL_TIMER_MS);
9906 			}
9907 			break;
9908 
9909 		case WDI_EVENT_LITE_RX:
9910 			if (pdev->monitor_vdev) {
9911 				/* Nothing needs to be done if monitor mode is
9912 				 * enabled
9913 				 */
9914 				return 0;
9915 			}
9916 
9917 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
9918 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
9919 
9920 				htt_tlv_filter.ppdu_start = 1;
9921 				htt_tlv_filter.ppdu_end = 1;
9922 				htt_tlv_filter.ppdu_end_user_stats = 1;
9923 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
9924 				htt_tlv_filter.ppdu_end_status_done = 1;
9925 				htt_tlv_filter.mpdu_start = 1;
9926 				htt_tlv_filter.enable_fp = 1;
9927 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
9928 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
9929 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
9930 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
9931 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
9932 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
9933 
9934 				for (mac_id = 0; mac_id < max_mac_rings;
9935 								mac_id++) {
9936 					int mac_for_pdev =
9937 						dp_get_mac_id_for_pdev(mac_id,
9938 								pdev->pdev_id);
9939 
9940 					htt_h2t_rx_ring_cfg(soc->htt_handle,
9941 					mac_for_pdev,
9942 					pdev->rxdma_mon_status_ring[mac_id]
9943 					.hal_srng,
9944 					RXDMA_MONITOR_STATUS,
9945 					RX_BUFFER_SIZE_PKTLOG_LITE,
9946 					&htt_tlv_filter);
9947 				}
9948 
9949 				if (soc->reap_timer_init)
9950 					qdf_timer_mod(&soc->mon_reap_timer,
9951 					DP_INTR_POLL_TIMER_MS);
9952 			}
9953 			break;
9954 
9955 		case WDI_EVENT_LITE_T2H:
9956 			if (pdev->monitor_vdev) {
9957 				/* Nothing needs to be done if monitor mode is
9958 				 * enabled
9959 				 */
9960 				return 0;
9961 			}
9962 
9963 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
9964 				int mac_for_pdev = dp_get_mac_id_for_pdev(
9965 							mac_id,	pdev->pdev_id);
9966 
9967 				pdev->pktlog_ppdu_stats = true;
9968 				dp_h2t_cfg_stats_msg_send(pdev,
9969 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
9970 					mac_for_pdev);
9971 			}
9972 			break;
9973 
9974 		default:
9975 			/* Nothing needs to be done for other pktlog types */
9976 			break;
9977 		}
9978 	} else {
9979 		switch (event) {
9980 		case WDI_EVENT_RX_DESC:
9981 		case WDI_EVENT_LITE_RX:
9982 			if (pdev->monitor_vdev) {
9983 				/* Nothing needs to be done if monitor mode is
9984 				 * enabled
9985 				 */
9986 				return 0;
9987 			}
9988 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
9989 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
9990 
9991 				for (mac_id = 0; mac_id < max_mac_rings;
9992 								mac_id++) {
9993 					int mac_for_pdev =
9994 						dp_get_mac_id_for_pdev(mac_id,
9995 								pdev->pdev_id);
9996 
9997 					htt_h2t_rx_ring_cfg(soc->htt_handle,
9998 					  mac_for_pdev,
9999 					  pdev->rxdma_mon_status_ring[mac_id]
10000 					  .hal_srng,
10001 					  RXDMA_MONITOR_STATUS,
10002 					  RX_BUFFER_SIZE,
10003 					  &htt_tlv_filter);
10004 				}
10005 
10006 				if (soc->reap_timer_init)
10007 					qdf_timer_stop(&soc->mon_reap_timer);
10008 			}
10009 			break;
10010 		case WDI_EVENT_LITE_T2H:
10011 			if (pdev->monitor_vdev) {
10012 				/* Nothing needs to be done if monitor mode is
10013 				 * enabled
10014 				 */
10015 				return 0;
10016 			}
10017 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
10018 			 * passing value 0. Once these macros will define in htt
10019 			 * header file will use proper macros
10020 			*/
10021 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
10022 				int mac_for_pdev =
10023 						dp_get_mac_id_for_pdev(mac_id,
10024 								pdev->pdev_id);
10025 
10026 				pdev->pktlog_ppdu_stats = false;
10027 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
10028 					dp_h2t_cfg_stats_msg_send(pdev, 0,
10029 								mac_for_pdev);
10030 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
10031 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
10032 								mac_for_pdev);
10033 				} else if (pdev->enhanced_stats_en) {
10034 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
10035 								mac_for_pdev);
10036 				}
10037 			}
10038 
10039 			break;
10040 		default:
10041 			/* Nothing needs to be done for other pktlog types */
10042 			break;
10043 		}
10044 	}
10045 	return 0;
10046 }
10047 #endif
10048