xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 6e2fed8f5f149f3d59b75fb27d2829d1de560487)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_htt.h"
31 #include "dp_types.h"
32 #include "dp_internal.h"
33 #include "dp_tx.h"
34 #include "dp_tx_desc.h"
35 #include "dp_rx.h"
36 #include <cdp_txrx_handle.h>
37 #include <wlan_cfg.h>
38 #include "cdp_txrx_cmn_struct.h"
39 #include "cdp_txrx_stats_struct.h"
40 #include "cdp_txrx_cmn_reg.h"
41 #include <qdf_util.h>
42 #include "dp_peer.h"
43 #include "dp_rx_mon.h"
44 #include "htt_stats.h"
45 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
46 #include "cfg_ucfg_api.h"
47 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
48 #include "cdp_txrx_flow_ctrl_v2.h"
49 #else
50 static inline void
51 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
52 {
53 	return;
54 }
55 #endif
56 #include "dp_ipa.h"
57 #include "dp_cal_client_api.h"
58 #ifdef CONFIG_MCL
59 extern int con_mode_monitor;
60 #ifndef REMOVE_PKT_LOG
61 #include <pktlog_ac_api.h>
62 #include <pktlog_ac.h>
63 #endif
64 #endif
65 void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle);
66 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
67 static struct dp_soc *
68 dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
69 	      struct ol_if_ops *ol_ops, uint16_t device_id);
70 static void dp_pktlogmod_exit(struct dp_pdev *handle);
71 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
72 				uint8_t *peer_mac_addr,
73 				struct cdp_ctrl_objmgr_peer *ctrl_peer);
74 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
75 static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
76 static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
77 
78 #define DP_INTR_POLL_TIMER_MS	10
79 /* Generic AST entry aging timer value */
80 #define DP_AST_AGING_TIMER_DEFAULT_MS	1000
81 /* WDS AST entry aging timer value */
82 #define DP_WDS_AST_AGING_TIMER_DEFAULT_MS	120000
83 #define DP_WDS_AST_AGING_TIMER_CNT \
84 ((DP_WDS_AST_AGING_TIMER_DEFAULT_MS / DP_AST_AGING_TIMER_DEFAULT_MS) - 1)
85 #define DP_MCS_LENGTH (6*MAX_MCS)
86 #define DP_NSS_LENGTH (6*SS_COUNT)
87 #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
88 #define DP_MAX_INT_CONTEXTS_STRING_LENGTH (6 * WLAN_CFG_INT_NUM_CONTEXTS)
89 #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
90 #define DP_MAX_MCS_STRING_LEN 30
91 #define DP_CURR_FW_STATS_AVAIL 19
92 #define DP_HTT_DBG_EXT_STATS_MAX 256
93 #define DP_MAX_SLEEP_TIME 100
94 #ifndef QCA_WIFI_3_0_EMU
95 #define SUSPEND_DRAIN_WAIT 500
96 #else
97 #define SUSPEND_DRAIN_WAIT 3000
98 #endif
99 
100 #ifdef IPA_OFFLOAD
101 /* Exclude IPA rings from the interrupt context */
102 #define TX_RING_MASK_VAL	0xb
103 #define RX_RING_MASK_VAL	0x7
104 #else
105 #define TX_RING_MASK_VAL	0xF
106 #define RX_RING_MASK_VAL	0xF
107 #endif
108 
109 #define STR_MAXLEN	64
110 
111 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
112 
113 /* PPDU stats mask sent to FW to enable enhanced stats */
114 #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
115 /* PPDU stats mask sent to FW to support debug sniffer feature */
116 #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
117 /* PPDU stats mask sent to FW to support BPR feature*/
118 #define DP_PPDU_STATS_CFG_BPR 0x2000
119 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
120 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
121 				   DP_PPDU_STATS_CFG_ENH_STATS)
122 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
123 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
124 				      DP_PPDU_TXLITE_STATS_BITMASK_CFG)
125 
126 #define RNG_ERR		"SRNG setup failed for"
127 /**
128  * default_dscp_tid_map - Default DSCP-TID mapping
129  *
130  * DSCP        TID
131  * 000000      0
132  * 001000      1
133  * 010000      2
134  * 011000      3
135  * 100000      4
136  * 101000      5
137  * 110000      6
138  * 111000      7
139  */
140 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
141 	0, 0, 0, 0, 0, 0, 0, 0,
142 	1, 1, 1, 1, 1, 1, 1, 1,
143 	2, 2, 2, 2, 2, 2, 2, 2,
144 	3, 3, 3, 3, 3, 3, 3, 3,
145 	4, 4, 4, 4, 4, 4, 4, 4,
146 	5, 5, 5, 5, 5, 5, 5, 5,
147 	6, 6, 6, 6, 6, 6, 6, 6,
148 	7, 7, 7, 7, 7, 7, 7, 7,
149 };
150 
151 /*
152  * struct dp_rate_debug
153  *
154  * @mcs_type: print string for a given mcs
155  * @valid: valid mcs rate?
156  */
157 struct dp_rate_debug {
158 	char mcs_type[DP_MAX_MCS_STRING_LEN];
159 	uint8_t valid;
160 };
161 
162 #define MCS_VALID 1
163 #define MCS_INVALID 0
164 
165 static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
166 
167 	{
168 		{"OFDM 48 Mbps", MCS_VALID},
169 		{"OFDM 24 Mbps", MCS_VALID},
170 		{"OFDM 12 Mbps", MCS_VALID},
171 		{"OFDM 6 Mbps ", MCS_VALID},
172 		{"OFDM 54 Mbps", MCS_VALID},
173 		{"OFDM 36 Mbps", MCS_VALID},
174 		{"OFDM 18 Mbps", MCS_VALID},
175 		{"OFDM 9 Mbps ", MCS_VALID},
176 		{"INVALID ", MCS_INVALID},
177 		{"INVALID ", MCS_INVALID},
178 		{"INVALID ", MCS_INVALID},
179 		{"INVALID ", MCS_INVALID},
180 		{"INVALID ", MCS_VALID},
181 	},
182 	{
183 		{"CCK 11 Mbps Long  ", MCS_VALID},
184 		{"CCK 5.5 Mbps Long ", MCS_VALID},
185 		{"CCK 2 Mbps Long   ", MCS_VALID},
186 		{"CCK 1 Mbps Long   ", MCS_VALID},
187 		{"CCK 11 Mbps Short ", MCS_VALID},
188 		{"CCK 5.5 Mbps Short", MCS_VALID},
189 		{"CCK 2 Mbps Short  ", MCS_VALID},
190 		{"INVALID ", MCS_INVALID},
191 		{"INVALID ", MCS_INVALID},
192 		{"INVALID ", MCS_INVALID},
193 		{"INVALID ", MCS_INVALID},
194 		{"INVALID ", MCS_INVALID},
195 		{"INVALID ", MCS_VALID},
196 	},
197 	{
198 		{"HT MCS 0 (BPSK 1/2)  ", MCS_VALID},
199 		{"HT MCS 1 (QPSK 1/2)  ", MCS_VALID},
200 		{"HT MCS 2 (QPSK 3/4)  ", MCS_VALID},
201 		{"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
202 		{"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
203 		{"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
204 		{"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
205 		{"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
206 		{"INVALID ", MCS_INVALID},
207 		{"INVALID ", MCS_INVALID},
208 		{"INVALID ", MCS_INVALID},
209 		{"INVALID ", MCS_INVALID},
210 		{"INVALID ", MCS_VALID},
211 	},
212 	{
213 		{"VHT MCS 0 (BPSK 1/2)     ", MCS_VALID},
214 		{"VHT MCS 1 (QPSK 1/2)     ", MCS_VALID},
215 		{"VHT MCS 2 (QPSK 3/4)     ", MCS_VALID},
216 		{"VHT MCS 3 (16-QAM 1/2)   ", MCS_VALID},
217 		{"VHT MCS 4 (16-QAM 3/4)   ", MCS_VALID},
218 		{"VHT MCS 5 (64-QAM 2/3)   ", MCS_VALID},
219 		{"VHT MCS 6 (64-QAM 3/4)   ", MCS_VALID},
220 		{"VHT MCS 7 (64-QAM 5/6)   ", MCS_VALID},
221 		{"VHT MCS 8 (256-QAM 3/4)  ", MCS_VALID},
222 		{"VHT MCS 9 (256-QAM 5/6)  ", MCS_VALID},
223 		{"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
224 		{"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
225 		{"INVALID ", MCS_VALID},
226 	},
227 	{
228 		{"HE MCS 0 (BPSK 1/2)     ", MCS_VALID},
229 		{"HE MCS 1 (QPSK 1/2)     ", MCS_VALID},
230 		{"HE MCS 2 (QPSK 3/4)     ", MCS_VALID},
231 		{"HE MCS 3 (16-QAM 1/2)   ", MCS_VALID},
232 		{"HE MCS 4 (16-QAM 3/4)   ", MCS_VALID},
233 		{"HE MCS 5 (64-QAM 2/3)   ", MCS_VALID},
234 		{"HE MCS 6 (64-QAM 3/4)   ", MCS_VALID},
235 		{"HE MCS 7 (64-QAM 5/6)   ", MCS_VALID},
236 		{"HE MCS 8 (256-QAM 3/4)  ", MCS_VALID},
237 		{"HE MCS 9 (256-QAM 5/6)  ", MCS_VALID},
238 		{"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
239 		{"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
240 		{"INVALID ", MCS_VALID},
241 	}
242 };
243 
244 /**
245  * dp_cpu_ring_map_type - dp tx cpu ring map
246  * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded
247  * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded
248  * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded
249  * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded
250  * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded
251  * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val
252  */
253 enum dp_cpu_ring_map_types {
254 	DP_NSS_DEFAULT_MAP,
255 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
256 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
257 	DP_NSS_DBDC_OFFLOADED_MAP,
258 	DP_NSS_DBTC_OFFLOADED_MAP,
259 	DP_NSS_CPU_RING_MAP_MAX
260 };
261 
262 /**
263  * @brief Cpu to tx ring map
264  */
265 #ifdef CONFIG_WIN
266 static uint8_t
267 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
268 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
269 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
270 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
271 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
272 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
273 };
274 #else
275 static uint8_t
276 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
277 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
278 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
279 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
280 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
281 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
282 };
283 #endif
284 
285 /**
286  * @brief Select the type of statistics
287  */
288 enum dp_stats_type {
289 	STATS_FW = 0,
290 	STATS_HOST = 1,
291 	STATS_TYPE_MAX = 2,
292 };
293 
294 /**
295  * @brief General Firmware statistics options
296  *
297  */
298 enum dp_fw_stats {
299 	TXRX_FW_STATS_INVALID	= -1,
300 };
301 
302 /**
303  * dp_stats_mapping_table - Firmware and Host statistics
304  * currently supported
305  */
306 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
307 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
308 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
309 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
310 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
311 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
312 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
313 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
314 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
315 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
316 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
317 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
318 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
319 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
320 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
321 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
322 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
323 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
324 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
325 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
326 	/* Last ENUM for HTT FW STATS */
327 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
328 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
329 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
330 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
331 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
332 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
333 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
334 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
335 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
336 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
337 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
338 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
339 };
340 
341 /* MCL specific functions */
342 #ifdef CONFIG_MCL
343 /**
344  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
345  * @soc: pointer to dp_soc handle
346  * @intr_ctx_num: interrupt context number for which mon mask is needed
347  *
348  * For MCL, monitor mode rings are being processed in timer contexts (polled).
349  * This function is returning 0, since in interrupt mode(softirq based RX),
350  * we donot want to process monitor mode rings in a softirq.
351  *
352  * So, in case packet log is enabled for SAP/STA/P2P modes,
353  * regular interrupt processing will not process monitor mode rings. It would be
354  * done in a separate timer context.
355  *
356  * Return: 0
357  */
358 static inline
359 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
360 {
361 	return 0;
362 }
363 
364 /*
365  * dp_service_mon_rings()- timer to reap monitor rings
366  * reqd as we are not getting ppdu end interrupts
367  * @arg: SoC Handle
368  *
369  * Return:
370  *
371  */
372 static void dp_service_mon_rings(void *arg)
373 {
374 	struct dp_soc *soc = (struct dp_soc *)arg;
375 	int ring = 0, work_done, mac_id;
376 	struct dp_pdev *pdev = NULL;
377 
378 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
379 		pdev = soc->pdev_list[ring];
380 		if (!pdev)
381 			continue;
382 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
383 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
384 								pdev->pdev_id);
385 			work_done = dp_mon_process(soc, mac_for_pdev,
386 						   QCA_NAPI_BUDGET);
387 
388 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
389 				  FL("Reaped %d descs from Monitor rings"),
390 				  work_done);
391 		}
392 	}
393 
394 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
395 }
396 
397 #ifndef REMOVE_PKT_LOG
398 /**
399  * dp_pkt_log_init() - API to initialize packet log
400  * @ppdev: physical device handle
401  * @scn: HIF context
402  *
403  * Return: none
404  */
405 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
406 {
407 	struct dp_pdev *handle = (struct dp_pdev *)ppdev;
408 
409 	if (handle->pkt_log_init) {
410 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
411 			  "%s: Packet log not initialized", __func__);
412 		return;
413 	}
414 
415 	pktlog_sethandle(&handle->pl_dev, scn);
416 	pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
417 
418 	if (pktlogmod_init(scn)) {
419 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
420 			  "%s: pktlogmod_init failed", __func__);
421 		handle->pkt_log_init = false;
422 	} else {
423 		handle->pkt_log_init = true;
424 	}
425 }
426 
427 /**
428  * dp_pkt_log_con_service() - connect packet log service
429  * @ppdev: physical device handle
430  * @scn: device context
431  *
432  * Return: none
433  */
434 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
435 {
436 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
437 
438 	dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
439 	pktlog_htc_attach();
440 }
441 
442 /**
443  * dp_get_num_rx_contexts() - get number of RX contexts
444  * @soc_hdl: cdp opaque soc handle
445  *
446  * Return: number of RX contexts
447  */
448 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
449 {
450 	int i;
451 	int num_rx_contexts = 0;
452 
453 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
454 
455 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
456 		if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
457 			num_rx_contexts++;
458 
459 	return num_rx_contexts;
460 }
461 
462 /**
463  * dp_pktlogmod_exit() - API to cleanup pktlog info
464  * @handle: Pdev handle
465  *
466  * Return: none
467  */
468 static void dp_pktlogmod_exit(struct dp_pdev *handle)
469 {
470 	void *scn = (void *)handle->soc->hif_handle;
471 
472 	if (!scn) {
473 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
474 			  "%s: Invalid hif(scn) handle", __func__);
475 		return;
476 	}
477 
478 	pktlogmod_exit(scn);
479 	handle->pkt_log_init = false;
480 }
481 #endif
482 #else
483 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
484 
485 /**
486  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
487  * @soc: pointer to dp_soc handle
488  * @intr_ctx_num: interrupt context number for which mon mask is needed
489  *
490  * Return: mon mask value
491  */
492 static inline
493 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
494 {
495 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
496 }
497 #endif
498 
499 /**
500  * dp_get_dp_vdev_from_cdp_vdev() - get dp_vdev from cdp_vdev by type-casting
501  * @cdp_opaque_vdev: pointer to cdp_vdev
502  *
503  * Return: pointer to dp_vdev
504  */
505 static
506 struct dp_vdev *dp_get_dp_vdev_from_cdp_vdev(struct cdp_vdev *cdp_opaque_vdev)
507 {
508 	return (struct dp_vdev *)cdp_opaque_vdev;
509 }
510 
511 
512 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
513 					struct cdp_peer *peer_hdl,
514 					uint8_t *mac_addr,
515 					enum cdp_txrx_ast_entry_type type,
516 					uint32_t flags)
517 {
518 
519 	return dp_peer_add_ast((struct dp_soc *)soc_hdl,
520 				(struct dp_peer *)peer_hdl,
521 				mac_addr,
522 				type,
523 				flags);
524 }
525 
526 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
527 						struct cdp_peer *peer_hdl,
528 						uint8_t *wds_macaddr,
529 						uint32_t flags)
530 {
531 	int status = -1;
532 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
533 	struct dp_ast_entry  *ast_entry = NULL;
534 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
535 
536 	qdf_spin_lock_bh(&soc->ast_lock);
537 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
538 						    peer->vdev->pdev->pdev_id);
539 
540 	if (ast_entry) {
541 		status = dp_peer_update_ast(soc,
542 					    peer,
543 					    ast_entry, flags);
544 	}
545 
546 	qdf_spin_unlock_bh(&soc->ast_lock);
547 
548 	return status;
549 }
550 
551 /*
552  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
553  * @soc_handle:		Datapath SOC handle
554  * @wds_macaddr:	WDS entry MAC Address
555  * Return: None
556  */
557 static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
558 				   uint8_t *wds_macaddr, void *vdev_handle)
559 {
560 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
561 	struct dp_ast_entry *ast_entry = NULL;
562 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
563 
564 	qdf_spin_lock_bh(&soc->ast_lock);
565 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
566 						    vdev->pdev->pdev_id);
567 
568 	if (ast_entry) {
569 		if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
570 			(ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
571 			(ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS)) {
572 			ast_entry->is_active = TRUE;
573 		}
574 	}
575 
576 	qdf_spin_unlock_bh(&soc->ast_lock);
577 }
578 
579 /*
580  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
581  * @soc:		Datapath SOC handle
582  *
583  * Return: None
584  */
585 static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
586 					 void *vdev_hdl)
587 {
588 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
589 	struct dp_pdev *pdev;
590 	struct dp_vdev *vdev;
591 	struct dp_peer *peer;
592 	struct dp_ast_entry *ase, *temp_ase;
593 	int i;
594 
595 	qdf_spin_lock_bh(&soc->ast_lock);
596 
597 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
598 		pdev = soc->pdev_list[i];
599 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
600 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
601 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
602 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
603 					if ((ase->type ==
604 						CDP_TXRX_AST_TYPE_STATIC) ||
605 						(ase->type ==
606 						CDP_TXRX_AST_TYPE_SELF) ||
607 						(ase->type ==
608 						CDP_TXRX_AST_TYPE_STA_BSS))
609 						continue;
610 					ase->is_active = TRUE;
611 				}
612 			}
613 		}
614 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
615 	}
616 
617 	qdf_spin_unlock_bh(&soc->ast_lock);
618 }
619 
620 /*
621  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
622  * @soc:		Datapath SOC handle
623  *
624  * Return: None
625  */
626 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
627 {
628 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
629 	struct dp_pdev *pdev;
630 	struct dp_vdev *vdev;
631 	struct dp_peer *peer;
632 	struct dp_ast_entry *ase, *temp_ase;
633 	int i;
634 
635 	qdf_spin_lock_bh(&soc->ast_lock);
636 
637 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
638 		pdev = soc->pdev_list[i];
639 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
640 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
641 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
642 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
643 					if ((ase->type ==
644 						CDP_TXRX_AST_TYPE_STATIC) ||
645 						(ase->type ==
646 						 CDP_TXRX_AST_TYPE_SELF) ||
647 						(ase->type ==
648 						 CDP_TXRX_AST_TYPE_STA_BSS))
649 						continue;
650 					dp_peer_del_ast(soc, ase);
651 				}
652 			}
653 		}
654 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
655 	}
656 
657 	qdf_spin_unlock_bh(&soc->ast_lock);
658 }
659 
660 /**
661  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
662  *                                       and return ast entry information
663  *                                       of first ast entry found in the
664  *                                       table with given mac address
665  *
666  * @soc : data path soc handle
667  * @ast_mac_addr : AST entry mac address
668  * @ast_entry_info : ast entry information
669  *
670  * return : true if ast entry found with ast_mac_addr
671  *          false if ast entry not found
672  */
673 static bool dp_peer_get_ast_info_by_soc_wifi3
674 	(struct cdp_soc_t *soc_hdl,
675 	 uint8_t *ast_mac_addr,
676 	 struct cdp_ast_entry_info *ast_entry_info)
677 {
678 	struct dp_ast_entry *ast_entry;
679 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
680 
681 	qdf_spin_lock_bh(&soc->ast_lock);
682 
683 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
684 
685 	if (ast_entry && !ast_entry->delete_in_progress) {
686 		ast_entry_info->type = ast_entry->type;
687 		ast_entry_info->pdev_id = ast_entry->pdev_id;
688 		ast_entry_info->vdev_id = ast_entry->vdev_id;
689 		ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
690 		qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
691 			     &ast_entry->peer->mac_addr.raw[0],
692 			     DP_MAC_ADDR_LEN);
693 		qdf_spin_unlock_bh(&soc->ast_lock);
694 		return true;
695 	}
696 
697 	qdf_spin_unlock_bh(&soc->ast_lock);
698 	return false;
699 }
700 
701 /**
702  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
703  *                                          and return ast entry information
704  *                                          if mac address and pdev_id matches
705  *
706  * @soc : data path soc handle
707  * @ast_mac_addr : AST entry mac address
708  * @pdev_id : pdev_id
709  * @ast_entry_info : ast entry information
710  *
711  * return : true if ast entry found with ast_mac_addr
712  *          false if ast entry not found
713  */
714 static bool dp_peer_get_ast_info_by_pdevid_wifi3
715 		(struct cdp_soc_t *soc_hdl,
716 		 uint8_t *ast_mac_addr,
717 		 uint8_t pdev_id,
718 		 struct cdp_ast_entry_info *ast_entry_info)
719 {
720 	struct dp_ast_entry *ast_entry;
721 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
722 
723 	qdf_spin_lock_bh(&soc->ast_lock);
724 
725 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, pdev_id);
726 
727 	if (ast_entry && !ast_entry->delete_in_progress) {
728 		ast_entry_info->type = ast_entry->type;
729 		ast_entry_info->pdev_id = ast_entry->pdev_id;
730 		ast_entry_info->vdev_id = ast_entry->vdev_id;
731 		ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
732 		qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
733 			     &ast_entry->peer->mac_addr.raw[0],
734 			     DP_MAC_ADDR_LEN);
735 		qdf_spin_unlock_bh(&soc->ast_lock);
736 		return true;
737 	}
738 
739 	qdf_spin_unlock_bh(&soc->ast_lock);
740 	return false;
741 }
742 
743 /**
744  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
745  *                            with given mac address
746  *
747  * @soc : data path soc handle
748  * @ast_mac_addr : AST entry mac address
749  * @callback : callback function to called on ast delete response from FW
750  * @cookie : argument to be passed to callback
751  *
752  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
753  *          is sent
754  *          QDF_STATUS_E_INVAL false if ast entry not found
755  */
756 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
757 					       uint8_t *mac_addr,
758 					       txrx_ast_free_cb callback,
759 					       void *cookie)
760 
761 {
762 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
763 	struct dp_ast_entry *ast_entry;
764 	txrx_ast_free_cb cb = NULL;
765 	void *arg = NULL;
766 
767 	qdf_spin_lock_bh(&soc->ast_lock);
768 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
769 	if (!ast_entry) {
770 		qdf_spin_unlock_bh(&soc->ast_lock);
771 		return -QDF_STATUS_E_INVAL;
772 	}
773 
774 	if (ast_entry->callback) {
775 		cb = ast_entry->callback;
776 		arg = ast_entry->cookie;
777 	}
778 
779 	ast_entry->callback = callback;
780 	ast_entry->cookie = cookie;
781 
782 	/*
783 	 * if delete_in_progress is set AST delete is sent to target
784 	 * and host is waiting for response should not send delete
785 	 * again
786 	 */
787 	if (!ast_entry->delete_in_progress)
788 		dp_peer_del_ast(soc, ast_entry);
789 
790 	qdf_spin_unlock_bh(&soc->ast_lock);
791 	if (cb) {
792 		cb(soc->ctrl_psoc,
793 		   soc,
794 		   arg,
795 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
796 	}
797 	return QDF_STATUS_SUCCESS;
798 }
799 
800 /**
801  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
802  *                                   table if mac address and pdev_id matches
803  *
804  * @soc : data path soc handle
805  * @ast_mac_addr : AST entry mac address
806  * @pdev_id : pdev id
807  * @callback : callback function to called on ast delete response from FW
808  * @cookie : argument to be passed to callback
809  *
810  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
811  *          is sent
812  *          QDF_STATUS_E_INVAL false if ast entry not found
813  */
814 
815 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
816 						uint8_t *mac_addr,
817 						uint8_t pdev_id,
818 						txrx_ast_free_cb callback,
819 						void *cookie)
820 
821 {
822 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
823 	struct dp_ast_entry *ast_entry;
824 	txrx_ast_free_cb cb = NULL;
825 	void *arg = NULL;
826 
827 	qdf_spin_lock_bh(&soc->ast_lock);
828 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
829 
830 	if (!ast_entry) {
831 		qdf_spin_unlock_bh(&soc->ast_lock);
832 		return -QDF_STATUS_E_INVAL;
833 	}
834 
835 	if (ast_entry->callback) {
836 		cb = ast_entry->callback;
837 		arg = ast_entry->cookie;
838 	}
839 
840 	ast_entry->callback = callback;
841 	ast_entry->cookie = cookie;
842 
843 	/*
844 	 * if delete_in_progress is set AST delete is sent to target
845 	 * and host is waiting for response should not sent delete
846 	 * again
847 	 */
848 	if (!ast_entry->delete_in_progress)
849 		dp_peer_del_ast(soc, ast_entry);
850 
851 	qdf_spin_unlock_bh(&soc->ast_lock);
852 
853 	if (cb) {
854 		cb(soc->ctrl_psoc,
855 		   soc,
856 		   arg,
857 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
858 	}
859 	return QDF_STATUS_SUCCESS;
860 }
861 
862 /**
863  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
864  * @ring_num: ring num of the ring being queried
865  * @grp_mask: the grp_mask array for the ring type in question.
866  *
867  * The grp_mask array is indexed by group number and the bit fields correspond
868  * to ring numbers.  We are finding which interrupt group a ring belongs to.
869  *
870  * Return: the index in the grp_mask array with the ring number.
871  * -QDF_STATUS_E_NOENT if no entry is found
872  */
873 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
874 {
875 	int ext_group_num;
876 	int mask = 1 << ring_num;
877 
878 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
879 	     ext_group_num++) {
880 		if (mask & grp_mask[ext_group_num])
881 			return ext_group_num;
882 	}
883 
884 	return -QDF_STATUS_E_NOENT;
885 }
886 
887 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
888 				       enum hal_ring_type ring_type,
889 				       int ring_num)
890 {
891 	int *grp_mask;
892 
893 	switch (ring_type) {
894 	case WBM2SW_RELEASE:
895 		/* dp_tx_comp_handler - soc->tx_comp_ring */
896 		if (ring_num < 3)
897 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
898 
899 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
900 		else if (ring_num == 3) {
901 			/* sw treats this as a separate ring type */
902 			grp_mask = &soc->wlan_cfg_ctx->
903 				int_rx_wbm_rel_ring_mask[0];
904 			ring_num = 0;
905 		} else {
906 			qdf_assert(0);
907 			return -QDF_STATUS_E_NOENT;
908 		}
909 	break;
910 
911 	case REO_EXCEPTION:
912 		/* dp_rx_err_process - &soc->reo_exception_ring */
913 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
914 	break;
915 
916 	case REO_DST:
917 		/* dp_rx_process - soc->reo_dest_ring */
918 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
919 	break;
920 
921 	case REO_STATUS:
922 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
923 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
924 	break;
925 
926 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
927 	case RXDMA_MONITOR_STATUS:
928 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
929 	case RXDMA_MONITOR_DST:
930 		/* dp_mon_process */
931 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
932 	break;
933 	case RXDMA_DST:
934 		/* dp_rxdma_err_process */
935 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
936 	break;
937 
938 	case RXDMA_BUF:
939 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
940 	break;
941 
942 	case RXDMA_MONITOR_BUF:
943 		/* TODO: support low_thresh interrupt */
944 		return -QDF_STATUS_E_NOENT;
945 	break;
946 
947 	case TCL_DATA:
948 	case TCL_CMD:
949 	case REO_CMD:
950 	case SW2WBM_RELEASE:
951 	case WBM_IDLE_LINK:
952 		/* normally empty SW_TO_HW rings */
953 		return -QDF_STATUS_E_NOENT;
954 	break;
955 
956 	case TCL_STATUS:
957 	case REO_REINJECT:
958 		/* misc unused rings */
959 		return -QDF_STATUS_E_NOENT;
960 	break;
961 
962 	case CE_SRC:
963 	case CE_DST:
964 	case CE_DST_STATUS:
965 		/* CE_rings - currently handled by hif */
966 	default:
967 		return -QDF_STATUS_E_NOENT;
968 	break;
969 	}
970 
971 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
972 }
973 
974 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
975 			      *ring_params, int ring_type, int ring_num)
976 {
977 	int msi_group_number;
978 	int msi_data_count;
979 	int ret;
980 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
981 
982 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
983 					    &msi_data_count, &msi_data_start,
984 					    &msi_irq_start);
985 
986 	if (ret)
987 		return;
988 
989 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
990 						       ring_num);
991 	if (msi_group_number < 0) {
992 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
993 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
994 			ring_type, ring_num);
995 		ring_params->msi_addr = 0;
996 		ring_params->msi_data = 0;
997 		return;
998 	}
999 
1000 	if (msi_group_number > msi_data_count) {
1001 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
1002 			FL("2 msi_groups will share an msi; msi_group_num %d"),
1003 			msi_group_number);
1004 
1005 		QDF_ASSERT(0);
1006 	}
1007 
1008 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1009 
1010 	ring_params->msi_addr = addr_low;
1011 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1012 	ring_params->msi_data = (msi_group_number % msi_data_count)
1013 		+ msi_data_start;
1014 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1015 }
1016 
1017 /**
1018  * dp_print_ast_stats() - Dump AST table contents
1019  * @soc: Datapath soc handle
1020  *
1021  * return void
1022  */
1023 #ifdef FEATURE_AST
1024 void dp_print_ast_stats(struct dp_soc *soc)
1025 {
1026 	uint8_t i;
1027 	uint8_t num_entries = 0;
1028 	struct dp_vdev *vdev;
1029 	struct dp_pdev *pdev;
1030 	struct dp_peer *peer;
1031 	struct dp_ast_entry *ase, *tmp_ase;
1032 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1033 			"NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
1034 			"DA", "HMWDS_SEC"};
1035 
1036 	DP_PRINT_STATS("AST Stats:");
1037 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1038 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1039 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1040 	DP_PRINT_STATS("AST Table:");
1041 
1042 	qdf_spin_lock_bh(&soc->ast_lock);
1043 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1044 		pdev = soc->pdev_list[i];
1045 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
1046 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1047 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1048 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1049 					DP_PRINT_STATS("%6d mac_addr = %pM"
1050 							" peer_mac_addr = %pM"
1051 							" peer_id = %u"
1052 							" type = %s"
1053 							" next_hop = %d"
1054 							" is_active = %d"
1055 							" is_bss = %d"
1056 							" ast_idx = %d"
1057 							" ast_hash = %d"
1058 							" delete_in_progress = %d"
1059 							" pdev_id = %d"
1060 							" vdev_id = %d",
1061 							++num_entries,
1062 							ase->mac_addr.raw,
1063 							ase->peer->mac_addr.raw,
1064 							ase->peer->peer_ids[0],
1065 							type[ase->type],
1066 							ase->next_hop,
1067 							ase->is_active,
1068 							ase->is_bss,
1069 							ase->ast_idx,
1070 							ase->ast_hash_value,
1071 							ase->delete_in_progress,
1072 							ase->pdev_id,
1073 							ase->vdev_id);
1074 				}
1075 			}
1076 		}
1077 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1078 	}
1079 	qdf_spin_unlock_bh(&soc->ast_lock);
1080 }
1081 #else
1082 void dp_print_ast_stats(struct dp_soc *soc)
1083 {
1084 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1085 	return;
1086 }
1087 #endif
1088 
1089 /**
1090  *  dp_print_peer_table() - Dump all Peer stats
1091  * @vdev: Datapath Vdev handle
1092  *
1093  * return void
1094  */
1095 static void dp_print_peer_table(struct dp_vdev *vdev)
1096 {
1097 	struct dp_peer *peer = NULL;
1098 
1099 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1100 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1101 		if (!peer) {
1102 			DP_PRINT_STATS("Invalid Peer");
1103 			return;
1104 		}
1105 		DP_PRINT_STATS("    peer_mac_addr = %pM"
1106 			       " nawds_enabled = %d"
1107 			       " bss_peer = %d"
1108 			       " wapi = %d"
1109 			       " wds_enabled = %d"
1110 			       " delete in progress = %d"
1111 			       " peer id = %d",
1112 			       peer->mac_addr.raw,
1113 			       peer->nawds_enabled,
1114 			       peer->bss_peer,
1115 			       peer->wapi,
1116 			       peer->wds_enabled,
1117 			       peer->delete_in_progress,
1118 			       peer->peer_ids[0]);
1119 	}
1120 }
1121 
1122 /*
1123  * dp_setup_srng - Internal function to setup SRNG rings used by data path
1124  */
1125 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
1126 	int ring_type, int ring_num, int mac_id, uint32_t num_entries)
1127 {
1128 	void *hal_soc = soc->hal_soc;
1129 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
1130 	/* TODO: See if we should get align size from hal */
1131 	uint32_t ring_base_align = 8;
1132 	struct hal_srng_params ring_params;
1133 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
1134 
1135 	/* TODO: Currently hal layer takes care of endianness related settings.
1136 	 * See if these settings need to passed from DP layer
1137 	 */
1138 	ring_params.flags = 0;
1139 
1140 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
1141 	srng->hal_srng = NULL;
1142 	srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
1143 	srng->num_entries = num_entries;
1144 
1145 	if (!soc->dp_soc_reinit) {
1146 		srng->base_vaddr_unaligned =
1147 			qdf_mem_alloc_consistent(soc->osdev,
1148 						 soc->osdev->dev,
1149 						 srng->alloc_size,
1150 						 &srng->base_paddr_unaligned);
1151 	}
1152 
1153 	if (!srng->base_vaddr_unaligned) {
1154 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1155 			FL("alloc failed - ring_type: %d, ring_num %d"),
1156 			ring_type, ring_num);
1157 		return QDF_STATUS_E_NOMEM;
1158 	}
1159 
1160 	ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
1161 		((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
1162 	ring_params.ring_base_paddr = srng->base_paddr_unaligned +
1163 		((unsigned long)(ring_params.ring_base_vaddr) -
1164 		(unsigned long)srng->base_vaddr_unaligned);
1165 	ring_params.num_entries = num_entries;
1166 
1167 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1168 		  FL("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u"),
1169 		  ring_type, ring_num, (void *)ring_params.ring_base_vaddr,
1170 		  (void *)ring_params.ring_base_paddr, ring_params.num_entries);
1171 
1172 	if (soc->intr_mode == DP_INTR_MSI) {
1173 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
1174 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1175 			  FL("Using MSI for ring_type: %d, ring_num %d"),
1176 			  ring_type, ring_num);
1177 
1178 	} else {
1179 		ring_params.msi_data = 0;
1180 		ring_params.msi_addr = 0;
1181 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1182 			  FL("Skipping MSI for ring_type: %d, ring_num %d"),
1183 			  ring_type, ring_num);
1184 	}
1185 
1186 	/*
1187 	 * Setup interrupt timer and batch counter thresholds for
1188 	 * interrupt mitigation based on ring type
1189 	 */
1190 	if (ring_type == REO_DST) {
1191 		ring_params.intr_timer_thres_us =
1192 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1193 		ring_params.intr_batch_cntr_thres_entries =
1194 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1195 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
1196 		ring_params.intr_timer_thres_us =
1197 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1198 		ring_params.intr_batch_cntr_thres_entries =
1199 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1200 	} else {
1201 		ring_params.intr_timer_thres_us =
1202 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1203 		ring_params.intr_batch_cntr_thres_entries =
1204 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1205 	}
1206 
1207 	/* Enable low threshold interrupts for rx buffer rings (regular and
1208 	 * monitor buffer rings.
1209 	 * TODO: See if this is required for any other ring
1210 	 */
1211 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1212 		(ring_type == RXDMA_MONITOR_STATUS)) {
1213 		/* TODO: Setting low threshold to 1/8th of ring size
1214 		 * see if this needs to be configurable
1215 		 */
1216 		ring_params.low_threshold = num_entries >> 3;
1217 		ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1218 		ring_params.intr_timer_thres_us =
1219 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1220 		ring_params.intr_batch_cntr_thres_entries = 0;
1221 	}
1222 
1223 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
1224 		mac_id, &ring_params);
1225 
1226 	if (!srng->hal_srng) {
1227 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1228 				srng->alloc_size,
1229 				srng->base_vaddr_unaligned,
1230 				srng->base_paddr_unaligned, 0);
1231 	}
1232 
1233 	return 0;
1234 }
1235 
1236 /*
1237  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
1238  * @soc: DP SOC handle
1239  * @srng: source ring structure
1240  * @ring_type: type of ring
1241  * @ring_num: ring number
1242  *
1243  * Return: None
1244  */
1245 static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
1246 			   int ring_type, int ring_num)
1247 {
1248 	if (!srng->hal_srng) {
1249 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1250 			  FL("Ring type: %d, num:%d not setup"),
1251 			  ring_type, ring_num);
1252 		return;
1253 	}
1254 
1255 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1256 	srng->hal_srng = NULL;
1257 }
1258 
1259 /**
1260  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
1261  * Any buffers allocated and attached to ring entries are expected to be freed
1262  * before calling this function.
1263  */
1264 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
1265 	int ring_type, int ring_num)
1266 {
1267 	if (!soc->dp_soc_reinit) {
1268 		if (!srng->hal_srng && (srng->alloc_size == 0)) {
1269 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1270 				  FL("Ring type: %d, num:%d not setup"),
1271 				  ring_type, ring_num);
1272 			return;
1273 		}
1274 
1275 		if (srng->hal_srng) {
1276 			hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1277 			srng->hal_srng = NULL;
1278 		}
1279 	}
1280 
1281 	if (srng->alloc_size) {
1282 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1283 					srng->alloc_size,
1284 					srng->base_vaddr_unaligned,
1285 					srng->base_paddr_unaligned, 0);
1286 		srng->alloc_size = 0;
1287 	}
1288 }
1289 
1290 /* TODO: Need this interface from HIF */
1291 void *hif_get_hal_handle(void *hif_handle);
1292 
1293 /*
1294  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1295  * @dp_ctx: DP SOC handle
1296  * @budget: Number of frames/descriptors that can be processed in one shot
1297  *
1298  * Return: remaining budget/quota for the soc device
1299  */
1300 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
1301 {
1302 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1303 	struct dp_soc *soc = int_ctx->soc;
1304 	int ring = 0;
1305 	uint32_t work_done  = 0;
1306 	int budget = dp_budget;
1307 	uint8_t tx_mask = int_ctx->tx_ring_mask;
1308 	uint8_t rx_mask = int_ctx->rx_ring_mask;
1309 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1310 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1311 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1312 	uint32_t remaining_quota = dp_budget;
1313 	struct dp_pdev *pdev = NULL;
1314 	int mac_id;
1315 
1316 	/* Process Tx completion interrupts first to return back buffers */
1317 	while (tx_mask) {
1318 		if (tx_mask & 0x1) {
1319 			work_done = dp_tx_comp_handler(soc,
1320 					soc->tx_comp_ring[ring].hal_srng,
1321 					remaining_quota);
1322 
1323 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1324 				  "tx mask 0x%x ring %d, budget %d, work_done %d",
1325 				  tx_mask, ring, budget, work_done);
1326 
1327 			budget -= work_done;
1328 			if (budget <= 0)
1329 				goto budget_done;
1330 
1331 			remaining_quota = budget;
1332 		}
1333 		tx_mask = tx_mask >> 1;
1334 		ring++;
1335 	}
1336 
1337 
1338 	/* Process REO Exception ring interrupt */
1339 	if (rx_err_mask) {
1340 		work_done = dp_rx_err_process(soc,
1341 				soc->reo_exception_ring.hal_srng,
1342 				remaining_quota);
1343 
1344 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1345 			"REO Exception Ring: work_done %d budget %d",
1346 			work_done, budget);
1347 
1348 		budget -=  work_done;
1349 		if (budget <= 0) {
1350 			goto budget_done;
1351 		}
1352 		remaining_quota = budget;
1353 	}
1354 
1355 	/* Process Rx WBM release ring interrupt */
1356 	if (rx_wbm_rel_mask) {
1357 		work_done = dp_rx_wbm_err_process(soc,
1358 				soc->rx_rel_ring.hal_srng, remaining_quota);
1359 
1360 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1361 			"WBM Release Ring: work_done %d budget %d",
1362 			work_done, budget);
1363 
1364 		budget -=  work_done;
1365 		if (budget <= 0) {
1366 			goto budget_done;
1367 		}
1368 		remaining_quota = budget;
1369 	}
1370 
1371 	/* Process Rx interrupts */
1372 	if (rx_mask) {
1373 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1374 			if (rx_mask & (1 << ring)) {
1375 				work_done = dp_rx_process(int_ctx,
1376 					    soc->reo_dest_ring[ring].hal_srng,
1377 					    ring,
1378 					    remaining_quota);
1379 
1380 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1381 					"rx mask 0x%x ring %d, work_done %d budget %d",
1382 					rx_mask, ring, work_done, budget);
1383 
1384 				budget -=  work_done;
1385 				if (budget <= 0)
1386 					goto budget_done;
1387 				remaining_quota = budget;
1388 			}
1389 		}
1390 	}
1391 
1392 	if (reo_status_mask)
1393 		dp_reo_status_ring_handler(soc);
1394 
1395 	/* Process LMAC interrupts */
1396 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
1397 		pdev = soc->pdev_list[ring];
1398 		if (pdev == NULL)
1399 			continue;
1400 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1401 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
1402 								pdev->pdev_id);
1403 
1404 			if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1405 				work_done = dp_mon_process(soc, mac_for_pdev,
1406 						remaining_quota);
1407 				budget -= work_done;
1408 				if (budget <= 0)
1409 					goto budget_done;
1410 				remaining_quota = budget;
1411 			}
1412 
1413 			if (int_ctx->rxdma2host_ring_mask &
1414 					(1 << mac_for_pdev)) {
1415 				work_done = dp_rxdma_err_process(soc,
1416 							mac_for_pdev,
1417 							remaining_quota);
1418 				budget -=  work_done;
1419 				if (budget <= 0)
1420 					goto budget_done;
1421 				remaining_quota = budget;
1422 			}
1423 
1424 			if (int_ctx->host2rxdma_ring_mask &
1425 						(1 << mac_for_pdev)) {
1426 				union dp_rx_desc_list_elem_t *desc_list = NULL;
1427 				union dp_rx_desc_list_elem_t *tail = NULL;
1428 				struct dp_srng *rx_refill_buf_ring =
1429 					&pdev->rx_refill_buf_ring;
1430 
1431 				DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1432 						1);
1433 				dp_rx_buffers_replenish(soc, mac_for_pdev,
1434 					rx_refill_buf_ring,
1435 					&soc->rx_desc_buf[mac_for_pdev], 0,
1436 					&desc_list, &tail);
1437 			}
1438 		}
1439 	}
1440 
1441 	qdf_lro_flush(int_ctx->lro_ctx);
1442 
1443 budget_done:
1444 	return dp_budget - budget;
1445 }
1446 
1447 /* dp_interrupt_timer()- timer poll for interrupts
1448  *
1449  * @arg: SoC Handle
1450  *
1451  * Return:
1452  *
1453  */
1454 static void dp_interrupt_timer(void *arg)
1455 {
1456 	struct dp_soc *soc = (struct dp_soc *) arg;
1457 	int i;
1458 
1459 	if (qdf_atomic_read(&soc->cmn_init_done)) {
1460 		for (i = 0;
1461 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1462 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
1463 
1464 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1465 	}
1466 }
1467 
1468 /*
1469  * dp_soc_attach_poll() - Register handlers for DP interrupts
1470  * @txrx_soc: DP SOC handle
1471  *
1472  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1473  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1474  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1475  *
1476  * Return: 0 for success, nonzero for failure.
1477  */
1478 static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
1479 {
1480 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1481 	int i;
1482 
1483 	soc->intr_mode = DP_INTR_POLL;
1484 
1485 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1486 		soc->intr_ctx[i].dp_intr_id = i;
1487 		soc->intr_ctx[i].tx_ring_mask =
1488 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1489 		soc->intr_ctx[i].rx_ring_mask =
1490 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1491 		soc->intr_ctx[i].rx_mon_ring_mask =
1492 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1493 		soc->intr_ctx[i].rx_err_ring_mask =
1494 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1495 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1496 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1497 		soc->intr_ctx[i].reo_status_ring_mask =
1498 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1499 		soc->intr_ctx[i].rxdma2host_ring_mask =
1500 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1501 		soc->intr_ctx[i].soc = soc;
1502 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1503 	}
1504 
1505 	qdf_timer_init(soc->osdev, &soc->int_timer,
1506 			dp_interrupt_timer, (void *)soc,
1507 			QDF_TIMER_TYPE_WAKE_APPS);
1508 
1509 	return QDF_STATUS_SUCCESS;
1510 }
1511 
1512 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
1513 #if defined(CONFIG_MCL)
1514 /*
1515  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1516  * @txrx_soc: DP SOC handle
1517  *
1518  * Call the appropriate attach function based on the mode of operation.
1519  * This is a WAR for enabling monitor mode.
1520  *
1521  * Return: 0 for success. nonzero for failure.
1522  */
1523 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1524 {
1525 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1526 
1527 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1528 	     con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
1529 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1530 				  "%s: Poll mode", __func__);
1531 		return dp_soc_attach_poll(txrx_soc);
1532 	} else {
1533 
1534 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1535 				  "%s: Interrupt  mode", __func__);
1536 		return dp_soc_interrupt_attach(txrx_soc);
1537 	}
1538 }
1539 #else
1540 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
1541 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1542 {
1543 	return dp_soc_attach_poll(txrx_soc);
1544 }
1545 #else
1546 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1547 {
1548 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1549 
1550 	if (hif_is_polled_mode_enabled(soc->hif_handle))
1551 		return dp_soc_attach_poll(txrx_soc);
1552 	else
1553 		return dp_soc_interrupt_attach(txrx_soc);
1554 }
1555 #endif
1556 #endif
1557 
1558 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1559 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1560 {
1561 	int j;
1562 	int num_irq = 0;
1563 
1564 	int tx_mask =
1565 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1566 	int rx_mask =
1567 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1568 	int rx_mon_mask =
1569 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1570 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1571 					soc->wlan_cfg_ctx, intr_ctx_num);
1572 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1573 					soc->wlan_cfg_ctx, intr_ctx_num);
1574 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1575 					soc->wlan_cfg_ctx, intr_ctx_num);
1576 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1577 					soc->wlan_cfg_ctx, intr_ctx_num);
1578 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1579 					soc->wlan_cfg_ctx, intr_ctx_num);
1580 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
1581 					soc->wlan_cfg_ctx, intr_ctx_num);
1582 
1583 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1584 
1585 		if (tx_mask & (1 << j)) {
1586 			irq_id_map[num_irq++] =
1587 				(wbm2host_tx_completions_ring1 - j);
1588 		}
1589 
1590 		if (rx_mask & (1 << j)) {
1591 			irq_id_map[num_irq++] =
1592 				(reo2host_destination_ring1 - j);
1593 		}
1594 
1595 		if (rxdma2host_ring_mask & (1 << j)) {
1596 			irq_id_map[num_irq++] =
1597 				rxdma2host_destination_ring_mac1 -
1598 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1599 		}
1600 
1601 		if (host2rxdma_ring_mask & (1 << j)) {
1602 			irq_id_map[num_irq++] =
1603 				host2rxdma_host_buf_ring_mac1 -
1604 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1605 		}
1606 
1607 		if (host2rxdma_mon_ring_mask & (1 << j)) {
1608 			irq_id_map[num_irq++] =
1609 				host2rxdma_monitor_ring1 -
1610 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1611 		}
1612 
1613 		if (rx_mon_mask & (1 << j)) {
1614 			irq_id_map[num_irq++] =
1615 				ppdu_end_interrupts_mac1 -
1616 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1617 			irq_id_map[num_irq++] =
1618 				rxdma2host_monitor_status_ring_mac1 -
1619 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1620 		}
1621 
1622 		if (rx_wbm_rel_ring_mask & (1 << j))
1623 			irq_id_map[num_irq++] = wbm2host_rx_release;
1624 
1625 		if (rx_err_ring_mask & (1 << j))
1626 			irq_id_map[num_irq++] = reo2host_exception;
1627 
1628 		if (reo_status_ring_mask & (1 << j))
1629 			irq_id_map[num_irq++] = reo2host_status;
1630 
1631 	}
1632 	*num_irq_r = num_irq;
1633 }
1634 
1635 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1636 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1637 		int msi_vector_count, int msi_vector_start)
1638 {
1639 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1640 					soc->wlan_cfg_ctx, intr_ctx_num);
1641 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1642 					soc->wlan_cfg_ctx, intr_ctx_num);
1643 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1644 					soc->wlan_cfg_ctx, intr_ctx_num);
1645 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1646 					soc->wlan_cfg_ctx, intr_ctx_num);
1647 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1648 					soc->wlan_cfg_ctx, intr_ctx_num);
1649 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1650 					soc->wlan_cfg_ctx, intr_ctx_num);
1651 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1652 					soc->wlan_cfg_ctx, intr_ctx_num);
1653 
1654 	unsigned int vector =
1655 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1656 	int num_irq = 0;
1657 
1658 	soc->intr_mode = DP_INTR_MSI;
1659 
1660 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1661 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1662 		irq_id_map[num_irq++] =
1663 			pld_get_msi_irq(soc->osdev->dev, vector);
1664 
1665 	*num_irq_r = num_irq;
1666 }
1667 
1668 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1669 				    int *irq_id_map, int *num_irq)
1670 {
1671 	int msi_vector_count, ret;
1672 	uint32_t msi_base_data, msi_vector_start;
1673 
1674 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1675 					    &msi_vector_count,
1676 					    &msi_base_data,
1677 					    &msi_vector_start);
1678 	if (ret)
1679 		return dp_soc_interrupt_map_calculate_integrated(soc,
1680 				intr_ctx_num, irq_id_map, num_irq);
1681 
1682 	else
1683 		dp_soc_interrupt_map_calculate_msi(soc,
1684 				intr_ctx_num, irq_id_map, num_irq,
1685 				msi_vector_count, msi_vector_start);
1686 }
1687 
1688 /*
1689  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1690  * @txrx_soc: DP SOC handle
1691  *
1692  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1693  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1694  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1695  *
1696  * Return: 0 for success. nonzero for failure.
1697  */
1698 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
1699 {
1700 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1701 
1702 	int i = 0;
1703 	int num_irq = 0;
1704 
1705 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1706 		int ret = 0;
1707 
1708 		/* Map of IRQ ids registered with one interrupt context */
1709 		int irq_id_map[HIF_MAX_GRP_IRQ];
1710 
1711 		int tx_mask =
1712 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1713 		int rx_mask =
1714 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1715 		int rx_mon_mask =
1716 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
1717 		int rx_err_ring_mask =
1718 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1719 		int rx_wbm_rel_ring_mask =
1720 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1721 		int reo_status_ring_mask =
1722 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1723 		int rxdma2host_ring_mask =
1724 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1725 		int host2rxdma_ring_mask =
1726 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1727 		int host2rxdma_mon_ring_mask =
1728 			wlan_cfg_get_host2rxdma_mon_ring_mask(
1729 				soc->wlan_cfg_ctx, i);
1730 
1731 		soc->intr_ctx[i].dp_intr_id = i;
1732 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1733 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1734 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1735 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1736 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1737 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1738 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1739 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1740 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
1741 			 host2rxdma_mon_ring_mask;
1742 
1743 		soc->intr_ctx[i].soc = soc;
1744 
1745 		num_irq = 0;
1746 
1747 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1748 					       &num_irq);
1749 
1750 		ret = hif_register_ext_group(soc->hif_handle,
1751 				num_irq, irq_id_map, dp_service_srngs,
1752 				&soc->intr_ctx[i], "dp_intr",
1753 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1754 
1755 		if (ret) {
1756 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1757 			FL("failed, ret = %d"), ret);
1758 
1759 			return QDF_STATUS_E_FAILURE;
1760 		}
1761 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1762 	}
1763 
1764 	hif_configure_ext_group_interrupts(soc->hif_handle);
1765 
1766 	return QDF_STATUS_SUCCESS;
1767 }
1768 
1769 /*
1770  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1771  * @txrx_soc: DP SOC handle
1772  *
1773  * Return: void
1774  */
1775 static void dp_soc_interrupt_detach(void *txrx_soc)
1776 {
1777 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1778 	int i;
1779 
1780 	if (soc->intr_mode == DP_INTR_POLL) {
1781 		qdf_timer_stop(&soc->int_timer);
1782 		qdf_timer_free(&soc->int_timer);
1783 	} else {
1784 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1785 	}
1786 
1787 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1788 		soc->intr_ctx[i].tx_ring_mask = 0;
1789 		soc->intr_ctx[i].rx_ring_mask = 0;
1790 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1791 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1792 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1793 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1794 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1795 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1796 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
1797 
1798 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1799 	}
1800 }
1801 
1802 #define AVG_MAX_MPDUS_PER_TID 128
1803 #define AVG_TIDS_PER_CLIENT 2
1804 #define AVG_FLOWS_PER_TID 2
1805 #define AVG_MSDUS_PER_FLOW 128
1806 #define AVG_MSDUS_PER_MPDU 4
1807 
1808 /*
1809  * Allocate and setup link descriptor pool that will be used by HW for
1810  * various link and queue descriptors and managed by WBM
1811  */
1812 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1813 {
1814 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1815 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1816 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1817 	uint32_t num_mpdus_per_link_desc =
1818 		hal_num_mpdus_per_link_desc(soc->hal_soc);
1819 	uint32_t num_msdus_per_link_desc =
1820 		hal_num_msdus_per_link_desc(soc->hal_soc);
1821 	uint32_t num_mpdu_links_per_queue_desc =
1822 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1823 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1824 	uint32_t total_link_descs, total_mem_size;
1825 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1826 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1827 	uint32_t num_link_desc_banks;
1828 	uint32_t last_bank_size = 0;
1829 	uint32_t entry_size, num_entries;
1830 	int i;
1831 	uint32_t desc_id = 0;
1832 	qdf_dma_addr_t *baseaddr = NULL;
1833 
1834 	/* Only Tx queue descriptors are allocated from common link descriptor
1835 	 * pool Rx queue descriptors are not included in this because (REO queue
1836 	 * extension descriptors) they are expected to be allocated contiguously
1837 	 * with REO queue descriptors
1838 	 */
1839 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1840 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1841 
1842 	num_mpdu_queue_descs = num_mpdu_link_descs /
1843 		num_mpdu_links_per_queue_desc;
1844 
1845 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1846 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1847 		num_msdus_per_link_desc;
1848 
1849 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1850 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1851 
1852 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1853 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1854 
1855 	/* Round up to power of 2 */
1856 	total_link_descs = 1;
1857 	while (total_link_descs < num_entries)
1858 		total_link_descs <<= 1;
1859 
1860 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1861 		FL("total_link_descs: %u, link_desc_size: %d"),
1862 		total_link_descs, link_desc_size);
1863 	total_mem_size =  total_link_descs * link_desc_size;
1864 
1865 	total_mem_size += link_desc_align;
1866 
1867 	if (total_mem_size <= max_alloc_size) {
1868 		num_link_desc_banks = 0;
1869 		last_bank_size = total_mem_size;
1870 	} else {
1871 		num_link_desc_banks = (total_mem_size) /
1872 			(max_alloc_size - link_desc_align);
1873 		last_bank_size = total_mem_size %
1874 			(max_alloc_size - link_desc_align);
1875 	}
1876 
1877 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1878 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
1879 		total_mem_size, num_link_desc_banks);
1880 
1881 	for (i = 0; i < num_link_desc_banks; i++) {
1882 		if (!soc->dp_soc_reinit) {
1883 			baseaddr = &soc->link_desc_banks[i].
1884 					base_paddr_unaligned;
1885 			soc->link_desc_banks[i].base_vaddr_unaligned =
1886 				qdf_mem_alloc_consistent(soc->osdev,
1887 							 soc->osdev->dev,
1888 							 max_alloc_size,
1889 							 baseaddr);
1890 		}
1891 		soc->link_desc_banks[i].size = max_alloc_size;
1892 
1893 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1894 			soc->link_desc_banks[i].base_vaddr_unaligned) +
1895 			((unsigned long)(
1896 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1897 			link_desc_align));
1898 
1899 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
1900 			soc->link_desc_banks[i].base_paddr_unaligned) +
1901 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1902 			(unsigned long)(
1903 			soc->link_desc_banks[i].base_vaddr_unaligned));
1904 
1905 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
1906 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1907 				FL("Link descriptor memory alloc failed"));
1908 			goto fail;
1909 		}
1910 	}
1911 
1912 	if (last_bank_size) {
1913 		/* Allocate last bank in case total memory required is not exact
1914 		 * multiple of max_alloc_size
1915 		 */
1916 		if (!soc->dp_soc_reinit) {
1917 			baseaddr = &soc->link_desc_banks[i].
1918 					base_paddr_unaligned;
1919 			soc->link_desc_banks[i].base_vaddr_unaligned =
1920 				qdf_mem_alloc_consistent(soc->osdev,
1921 							 soc->osdev->dev,
1922 							 last_bank_size,
1923 							 baseaddr);
1924 		}
1925 		soc->link_desc_banks[i].size = last_bank_size;
1926 
1927 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1928 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
1929 			((unsigned long)(
1930 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1931 			link_desc_align));
1932 
1933 		soc->link_desc_banks[i].base_paddr =
1934 			(unsigned long)(
1935 			soc->link_desc_banks[i].base_paddr_unaligned) +
1936 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1937 			(unsigned long)(
1938 			soc->link_desc_banks[i].base_vaddr_unaligned));
1939 	}
1940 
1941 
1942 	/* Allocate and setup link descriptor idle list for HW internal use */
1943 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1944 	total_mem_size = entry_size * total_link_descs;
1945 
1946 	if (total_mem_size <= max_alloc_size) {
1947 		void *desc;
1948 
1949 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
1950 			WBM_IDLE_LINK, 0, 0, total_link_descs)) {
1951 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1952 				FL("Link desc idle ring setup failed"));
1953 			goto fail;
1954 		}
1955 
1956 		hal_srng_access_start_unlocked(soc->hal_soc,
1957 			soc->wbm_idle_link_ring.hal_srng);
1958 
1959 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1960 			soc->link_desc_banks[i].base_paddr; i++) {
1961 			uint32_t num_entries = (soc->link_desc_banks[i].size -
1962 				((unsigned long)(
1963 				soc->link_desc_banks[i].base_vaddr) -
1964 				(unsigned long)(
1965 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1966 				/ link_desc_size;
1967 			unsigned long paddr = (unsigned long)(
1968 				soc->link_desc_banks[i].base_paddr);
1969 
1970 			while (num_entries && (desc = hal_srng_src_get_next(
1971 				soc->hal_soc,
1972 				soc->wbm_idle_link_ring.hal_srng))) {
1973 				hal_set_link_desc_addr(desc,
1974 					LINK_DESC_COOKIE(desc_id, i), paddr);
1975 				num_entries--;
1976 				desc_id++;
1977 				paddr += link_desc_size;
1978 			}
1979 		}
1980 		hal_srng_access_end_unlocked(soc->hal_soc,
1981 			soc->wbm_idle_link_ring.hal_srng);
1982 	} else {
1983 		uint32_t num_scatter_bufs;
1984 		uint32_t num_entries_per_buf;
1985 		uint32_t rem_entries;
1986 		uint8_t *scatter_buf_ptr;
1987 		uint16_t scatter_buf_num;
1988 		uint32_t buf_size = 0;
1989 
1990 		soc->wbm_idle_scatter_buf_size =
1991 			hal_idle_list_scatter_buf_size(soc->hal_soc);
1992 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
1993 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
1994 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1995 					soc->hal_soc, total_mem_size,
1996 					soc->wbm_idle_scatter_buf_size);
1997 
1998 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
1999 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2000 					FL("scatter bufs size out of bounds"));
2001 			goto fail;
2002 		}
2003 
2004 		for (i = 0; i < num_scatter_bufs; i++) {
2005 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
2006 			if (!soc->dp_soc_reinit) {
2007 				buf_size = soc->wbm_idle_scatter_buf_size;
2008 				soc->wbm_idle_scatter_buf_base_vaddr[i] =
2009 					qdf_mem_alloc_consistent(soc->osdev,
2010 								 soc->osdev->
2011 								 dev,
2012 								 buf_size,
2013 								 baseaddr);
2014 			}
2015 			if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
2016 				QDF_TRACE(QDF_MODULE_ID_DP,
2017 					  QDF_TRACE_LEVEL_ERROR,
2018 					  FL("Scatter lst memory alloc fail"));
2019 				goto fail;
2020 			}
2021 		}
2022 
2023 		/* Populate idle list scatter buffers with link descriptor
2024 		 * pointers
2025 		 */
2026 		scatter_buf_num = 0;
2027 		scatter_buf_ptr = (uint8_t *)(
2028 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
2029 		rem_entries = num_entries_per_buf;
2030 
2031 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
2032 			soc->link_desc_banks[i].base_paddr; i++) {
2033 			uint32_t num_link_descs =
2034 				(soc->link_desc_banks[i].size -
2035 				((unsigned long)(
2036 				soc->link_desc_banks[i].base_vaddr) -
2037 				(unsigned long)(
2038 				soc->link_desc_banks[i].base_vaddr_unaligned)))
2039 				/ link_desc_size;
2040 			unsigned long paddr = (unsigned long)(
2041 				soc->link_desc_banks[i].base_paddr);
2042 
2043 			while (num_link_descs) {
2044 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
2045 					LINK_DESC_COOKIE(desc_id, i), paddr);
2046 				num_link_descs--;
2047 				desc_id++;
2048 				paddr += link_desc_size;
2049 				rem_entries--;
2050 				if (rem_entries) {
2051 					scatter_buf_ptr += entry_size;
2052 				} else {
2053 					rem_entries = num_entries_per_buf;
2054 					scatter_buf_num++;
2055 
2056 					if (scatter_buf_num >= num_scatter_bufs)
2057 						break;
2058 
2059 					scatter_buf_ptr = (uint8_t *)(
2060 						soc->wbm_idle_scatter_buf_base_vaddr[
2061 						scatter_buf_num]);
2062 				}
2063 			}
2064 		}
2065 		/* Setup link descriptor idle list in HW */
2066 		hal_setup_link_idle_list(soc->hal_soc,
2067 			soc->wbm_idle_scatter_buf_base_paddr,
2068 			soc->wbm_idle_scatter_buf_base_vaddr,
2069 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
2070 			(uint32_t)(scatter_buf_ptr -
2071 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
2072 			scatter_buf_num-1])), total_link_descs);
2073 	}
2074 	return 0;
2075 
2076 fail:
2077 	if (soc->wbm_idle_link_ring.hal_srng) {
2078 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
2079 				WBM_IDLE_LINK, 0);
2080 	}
2081 
2082 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2083 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2084 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2085 				soc->wbm_idle_scatter_buf_size,
2086 				soc->wbm_idle_scatter_buf_base_vaddr[i],
2087 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
2088 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
2089 		}
2090 	}
2091 
2092 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
2093 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
2094 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2095 				soc->link_desc_banks[i].size,
2096 				soc->link_desc_banks[i].base_vaddr_unaligned,
2097 				soc->link_desc_banks[i].base_paddr_unaligned,
2098 				0);
2099 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
2100 		}
2101 	}
2102 	return QDF_STATUS_E_FAILURE;
2103 }
2104 
2105 /*
2106  * Free link descriptor pool that was setup HW
2107  */
2108 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
2109 {
2110 	int i;
2111 
2112 	if (soc->wbm_idle_link_ring.hal_srng) {
2113 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
2114 			WBM_IDLE_LINK, 0);
2115 	}
2116 
2117 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2118 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2119 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2120 				soc->wbm_idle_scatter_buf_size,
2121 				soc->wbm_idle_scatter_buf_base_vaddr[i],
2122 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
2123 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
2124 		}
2125 	}
2126 
2127 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
2128 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
2129 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2130 				soc->link_desc_banks[i].size,
2131 				soc->link_desc_banks[i].base_vaddr_unaligned,
2132 				soc->link_desc_banks[i].base_paddr_unaligned,
2133 				0);
2134 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
2135 		}
2136 	}
2137 }
2138 
2139 #ifdef IPA_OFFLOAD
2140 #define REO_DST_RING_SIZE_QCA6290 1023
2141 #ifndef QCA_WIFI_QCA8074_VP
2142 #define REO_DST_RING_SIZE_QCA8074 1023
2143 #else
2144 #define REO_DST_RING_SIZE_QCA8074 8
2145 #endif /* QCA_WIFI_QCA8074_VP */
2146 
2147 #else
2148 
2149 #define REO_DST_RING_SIZE_QCA6290 1024
2150 #ifndef QCA_WIFI_QCA8074_VP
2151 #define REO_DST_RING_SIZE_QCA8074 2048
2152 #else
2153 #define REO_DST_RING_SIZE_QCA8074 8
2154 #endif /* QCA_WIFI_QCA8074_VP */
2155 #endif /* IPA_OFFLOAD */
2156 
2157 /*
2158  * dp_ast_aging_timer_fn() - Timer callback function for WDS aging
2159  * @soc: Datapath SOC handle
2160  *
2161  * This is a timer function used to age out stale AST nodes from
2162  * AST table
2163  */
2164 #ifdef FEATURE_WDS
2165 static void dp_ast_aging_timer_fn(void *soc_hdl)
2166 {
2167 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
2168 	struct dp_pdev *pdev;
2169 	struct dp_vdev *vdev;
2170 	struct dp_peer *peer;
2171 	struct dp_ast_entry *ase, *temp_ase;
2172 	int i;
2173 	bool check_wds_ase = false;
2174 
2175 	if (soc->wds_ast_aging_timer_cnt++ >= DP_WDS_AST_AGING_TIMER_CNT) {
2176 		soc->wds_ast_aging_timer_cnt = 0;
2177 		check_wds_ase = true;
2178 	}
2179 
2180 	 /* Peer list access lock */
2181 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
2182 
2183 	/* AST list access lock */
2184 	qdf_spin_lock_bh(&soc->ast_lock);
2185 
2186 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
2187 		pdev = soc->pdev_list[i];
2188 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
2189 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
2190 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
2191 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
2192 					/*
2193 					 * Do not expire static ast entries
2194 					 * and HM WDS entries
2195 					 */
2196 					if (ase->type !=
2197 					    CDP_TXRX_AST_TYPE_WDS &&
2198 					    ase->type !=
2199 					    CDP_TXRX_AST_TYPE_MEC &&
2200 					    ase->type !=
2201 					    CDP_TXRX_AST_TYPE_DA)
2202 						continue;
2203 
2204 					/* Expire MEC entry every n sec.
2205 					 * This needs to be expired in
2206 					 * case if STA backbone is made as
2207 					 * AP backbone, In this case it needs
2208 					 * to be re-added as a WDS entry.
2209 					 */
2210 					if (ase->is_active && ase->type ==
2211 					    CDP_TXRX_AST_TYPE_MEC) {
2212 						ase->is_active = FALSE;
2213 						continue;
2214 					} else if (ase->is_active &&
2215 						   check_wds_ase) {
2216 						ase->is_active = FALSE;
2217 						continue;
2218 					}
2219 
2220 					if (ase->type ==
2221 					    CDP_TXRX_AST_TYPE_MEC) {
2222 						DP_STATS_INC(soc,
2223 							     ast.aged_out, 1);
2224 						dp_peer_del_ast(soc, ase);
2225 					} else if (check_wds_ase) {
2226 						DP_STATS_INC(soc,
2227 							     ast.aged_out, 1);
2228 						dp_peer_del_ast(soc, ase);
2229 					}
2230 				}
2231 			}
2232 		}
2233 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2234 	}
2235 
2236 	qdf_spin_unlock_bh(&soc->ast_lock);
2237 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2238 
2239 	if (qdf_atomic_read(&soc->cmn_init_done))
2240 		qdf_timer_mod(&soc->ast_aging_timer,
2241 			      DP_AST_AGING_TIMER_DEFAULT_MS);
2242 }
2243 
2244 
2245 /*
2246  * dp_soc_wds_attach() - Setup WDS timer and AST table
2247  * @soc:		Datapath SOC handle
2248  *
2249  * Return: None
2250  */
2251 static void dp_soc_wds_attach(struct dp_soc *soc)
2252 {
2253 	soc->wds_ast_aging_timer_cnt = 0;
2254 	qdf_timer_init(soc->osdev, &soc->ast_aging_timer,
2255 		       dp_ast_aging_timer_fn, (void *)soc,
2256 		       QDF_TIMER_TYPE_WAKE_APPS);
2257 
2258 	qdf_timer_mod(&soc->ast_aging_timer, DP_AST_AGING_TIMER_DEFAULT_MS);
2259 }
2260 
2261 /*
2262  * dp_soc_wds_detach() - Detach WDS data structures and timers
2263  * @txrx_soc: DP SOC handle
2264  *
2265  * Return: None
2266  */
2267 static void dp_soc_wds_detach(struct dp_soc *soc)
2268 {
2269 	qdf_timer_stop(&soc->ast_aging_timer);
2270 	qdf_timer_free(&soc->ast_aging_timer);
2271 }
2272 #else
2273 static void dp_soc_wds_attach(struct dp_soc *soc)
2274 {
2275 }
2276 
2277 static void dp_soc_wds_detach(struct dp_soc *soc)
2278 {
2279 }
2280 #endif
2281 
2282 /*
2283  * dp_soc_reset_ring_map() - Reset cpu ring map
2284  * @soc: Datapath soc handler
2285  *
2286  * This api resets the default cpu ring map
2287  */
2288 
2289 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
2290 {
2291 	uint8_t i;
2292 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2293 
2294 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
2295 		switch (nss_config) {
2296 		case dp_nss_cfg_first_radio:
2297 			/*
2298 			 * Setting Tx ring map for one nss offloaded radio
2299 			 */
2300 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
2301 			break;
2302 
2303 		case dp_nss_cfg_second_radio:
2304 			/*
2305 			 * Setting Tx ring for two nss offloaded radios
2306 			 */
2307 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
2308 			break;
2309 
2310 		case dp_nss_cfg_dbdc:
2311 			/*
2312 			 * Setting Tx ring map for 2 nss offloaded radios
2313 			 */
2314 			soc->tx_ring_map[i] =
2315 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
2316 			break;
2317 
2318 		case dp_nss_cfg_dbtc:
2319 			/*
2320 			 * Setting Tx ring map for 3 nss offloaded radios
2321 			 */
2322 			soc->tx_ring_map[i] =
2323 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
2324 			break;
2325 
2326 		default:
2327 			dp_err("tx_ring_map failed due to invalid nss cfg");
2328 			break;
2329 		}
2330 	}
2331 }
2332 
2333 /*
2334  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
2335  * @dp_soc - DP soc handle
2336  * @ring_type - ring type
2337  * @ring_num - ring_num
2338  *
2339  * return 0 or 1
2340  */
2341 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
2342 {
2343 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2344 	uint8_t status = 0;
2345 
2346 	switch (ring_type) {
2347 	case WBM2SW_RELEASE:
2348 	case REO_DST:
2349 	case RXDMA_BUF:
2350 		status = ((nss_config) & (1 << ring_num));
2351 		break;
2352 	default:
2353 		break;
2354 	}
2355 
2356 	return status;
2357 }
2358 
2359 /*
2360  * dp_soc_reset_intr_mask() - reset interrupt mask
2361  * @dp_soc - DP Soc handle
2362  *
2363  * Return: Return void
2364  */
2365 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
2366 {
2367 	uint8_t j;
2368 	int *grp_mask = NULL;
2369 	int group_number, mask, num_ring;
2370 
2371 	/* number of tx ring */
2372 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2373 
2374 	/*
2375 	 * group mask for tx completion  ring.
2376 	 */
2377 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2378 
2379 	/* loop and reset the mask for only offloaded ring */
2380 	for (j = 0; j < num_ring; j++) {
2381 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2382 			continue;
2383 		}
2384 
2385 		/*
2386 		 * Group number corresponding to tx offloaded ring.
2387 		 */
2388 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2389 		if (group_number < 0) {
2390 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2391 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2392 					WBM2SW_RELEASE, j);
2393 			return;
2394 		}
2395 
2396 		/* reset the tx mask for offloaded ring */
2397 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2398 		mask &= (~(1 << j));
2399 
2400 		/*
2401 		 * reset the interrupt mask for offloaded ring.
2402 		 */
2403 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2404 	}
2405 
2406 	/* number of rx rings */
2407 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2408 
2409 	/*
2410 	 * group mask for reo destination ring.
2411 	 */
2412 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2413 
2414 	/* loop and reset the mask for only offloaded ring */
2415 	for (j = 0; j < num_ring; j++) {
2416 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2417 			continue;
2418 		}
2419 
2420 		/*
2421 		 * Group number corresponding to rx offloaded ring.
2422 		 */
2423 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2424 		if (group_number < 0) {
2425 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2426 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2427 					REO_DST, j);
2428 			return;
2429 		}
2430 
2431 		/* set the interrupt mask for offloaded ring */
2432 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2433 		mask &= (~(1 << j));
2434 
2435 		/*
2436 		 * set the interrupt mask to zero for rx offloaded radio.
2437 		 */
2438 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2439 	}
2440 
2441 	/*
2442 	 * group mask for Rx buffer refill ring
2443 	 */
2444 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2445 
2446 	/* loop and reset the mask for only offloaded ring */
2447 	for (j = 0; j < MAX_PDEV_CNT; j++) {
2448 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2449 			continue;
2450 		}
2451 
2452 		/*
2453 		 * Group number corresponding to rx offloaded ring.
2454 		 */
2455 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2456 		if (group_number < 0) {
2457 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2458 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2459 					REO_DST, j);
2460 			return;
2461 		}
2462 
2463 		/* set the interrupt mask for offloaded ring */
2464 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2465 				group_number);
2466 		mask &= (~(1 << j));
2467 
2468 		/*
2469 		 * set the interrupt mask to zero for rx offloaded radio.
2470 		 */
2471 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2472 			group_number, mask);
2473 	}
2474 }
2475 
2476 #ifdef IPA_OFFLOAD
2477 /**
2478  * dp_reo_remap_config() - configure reo remap register value based
2479  *                         nss configuration.
2480  *		based on offload_radio value below remap configuration
2481  *		get applied.
2482  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2483  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2484  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2485  *		3 - both Radios handled by NSS (remap not required)
2486  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2487  *
2488  * @remap1: output parameter indicates reo remap 1 register value
2489  * @remap2: output parameter indicates reo remap 2 register value
2490  * Return: bool type, true if remap is configured else false.
2491  */
2492 static bool dp_reo_remap_config(struct dp_soc *soc,
2493 				uint32_t *remap1,
2494 				uint32_t *remap2)
2495 {
2496 	*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
2497 		(0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
2498 
2499 	*remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
2500 		(0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
2501 
2502 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
2503 
2504 	return true;
2505 }
2506 #else
2507 static bool dp_reo_remap_config(struct dp_soc *soc,
2508 				uint32_t *remap1,
2509 				uint32_t *remap2)
2510 {
2511 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2512 
2513 	switch (offload_radio) {
2514 	case dp_nss_cfg_default:
2515 		*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2516 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2517 			(0x3 << 18) | (0x4 << 21)) << 8;
2518 
2519 		*remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2520 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2521 			(0x3 << 18) | (0x4 << 21)) << 8;
2522 		break;
2523 	case dp_nss_cfg_first_radio:
2524 		*remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
2525 			(0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
2526 			(0x2 << 18) | (0x3 << 21)) << 8;
2527 
2528 		*remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
2529 			(0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
2530 			(0x4 << 18) | (0x2 << 21)) << 8;
2531 		break;
2532 
2533 	case dp_nss_cfg_second_radio:
2534 		*remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
2535 			(0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
2536 			(0x1 << 18) | (0x3 << 21)) << 8;
2537 
2538 		*remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
2539 			(0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
2540 			(0x4 << 18) | (0x1 << 21)) << 8;
2541 		break;
2542 
2543 	case dp_nss_cfg_dbdc:
2544 	case dp_nss_cfg_dbtc:
2545 		/* return false if both or all are offloaded to NSS */
2546 		return false;
2547 	}
2548 
2549 	dp_debug("remap1 %x remap2 %x offload_radio %u",
2550 		 *remap1, *remap2, offload_radio);
2551 	return true;
2552 }
2553 #endif
2554 
2555 /*
2556  * dp_reo_frag_dst_set() - configure reo register to set the
2557  *                        fragment destination ring
2558  * @soc : Datapath soc
2559  * @frag_dst_ring : output parameter to set fragment destination ring
2560  *
2561  * Based on offload_radio below fragment destination rings is selected
2562  * 0 - TCL
2563  * 1 - SW1
2564  * 2 - SW2
2565  * 3 - SW3
2566  * 4 - SW4
2567  * 5 - Release
2568  * 6 - FW
2569  * 7 - alternate select
2570  *
2571  * return: void
2572  */
2573 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2574 {
2575 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2576 
2577 	switch (offload_radio) {
2578 	case dp_nss_cfg_default:
2579 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2580 		break;
2581 	case dp_nss_cfg_dbdc:
2582 	case dp_nss_cfg_dbtc:
2583 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2584 		break;
2585 	default:
2586 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2587 				FL("dp_reo_frag_dst_set invalid offload radio config"));
2588 		break;
2589 	}
2590 }
2591 
2592 /*
2593  * dp_soc_cmn_setup() - Common SoC level initializion
2594  * @soc:		Datapath SOC handle
2595  *
2596  * This is an internal function used to setup common SOC data structures,
2597  * to be called from PDEV attach after receiving HW mode capabilities from FW
2598  */
2599 static int dp_soc_cmn_setup(struct dp_soc *soc)
2600 {
2601 	int i;
2602 	struct hal_reo_params reo_params;
2603 	int tx_ring_size;
2604 	int tx_comp_ring_size;
2605 	int reo_dst_ring_size;
2606 	uint32_t entries;
2607 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2608 
2609 	if (qdf_atomic_read(&soc->cmn_init_done))
2610 		return 0;
2611 
2612 	if (dp_hw_link_desc_pool_setup(soc))
2613 		goto fail1;
2614 
2615 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2616 	/* Setup SRNG rings */
2617 	/* Common rings */
2618 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
2619 		wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx))) {
2620 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2621 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
2622 		goto fail1;
2623 	}
2624 
2625 	soc->num_tcl_data_rings = 0;
2626 	/* Tx data rings */
2627 	if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
2628 		soc->num_tcl_data_rings =
2629 			wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
2630 		tx_comp_ring_size =
2631 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2632 		tx_ring_size =
2633 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2634 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2635 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
2636 				TCL_DATA, i, 0, tx_ring_size)) {
2637 				QDF_TRACE(QDF_MODULE_ID_DP,
2638 					QDF_TRACE_LEVEL_ERROR,
2639 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
2640 				goto fail1;
2641 			}
2642 			/*
2643 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2644 			 * count
2645 			 */
2646 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
2647 				WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
2648 				QDF_TRACE(QDF_MODULE_ID_DP,
2649 					QDF_TRACE_LEVEL_ERROR,
2650 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
2651 				goto fail1;
2652 			}
2653 		}
2654 	} else {
2655 		/* This will be incremented during per pdev ring setup */
2656 		soc->num_tcl_data_rings = 0;
2657 	}
2658 
2659 	if (dp_tx_soc_attach(soc)) {
2660 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2661 				FL("dp_tx_soc_attach failed"));
2662 		goto fail1;
2663 	}
2664 
2665 	entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
2666 	/* TCL command and status rings */
2667 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
2668 			  entries)) {
2669 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2670 			FL("dp_srng_setup failed for tcl_cmd_ring"));
2671 		goto fail1;
2672 	}
2673 
2674 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
2675 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
2676 			  entries)) {
2677 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2678 			FL("dp_srng_setup failed for tcl_status_ring"));
2679 		goto fail1;
2680 	}
2681 
2682 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2683 
2684 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2685 	 * descriptors
2686 	 */
2687 
2688 	/* Rx data rings */
2689 	if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2690 		soc->num_reo_dest_rings =
2691 			wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
2692 		QDF_TRACE(QDF_MODULE_ID_DP,
2693 			QDF_TRACE_LEVEL_INFO,
2694 			FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
2695 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2696 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2697 				i, 0, reo_dst_ring_size)) {
2698 				QDF_TRACE(QDF_MODULE_ID_DP,
2699 					  QDF_TRACE_LEVEL_ERROR,
2700 					  FL(RNG_ERR "reo_dest_ring [%d]"), i);
2701 				goto fail1;
2702 			}
2703 		}
2704 	} else {
2705 		/* This will be incremented during per pdev ring setup */
2706 		soc->num_reo_dest_rings = 0;
2707 	}
2708 
2709 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2710 	/* LMAC RxDMA to SW Rings configuration */
2711 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
2712 		/* Only valid for MCL */
2713 		struct dp_pdev *pdev = soc->pdev_list[0];
2714 
2715 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2716 			if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2717 					  RXDMA_DST, 0, i,
2718 					  entries)) {
2719 				QDF_TRACE(QDF_MODULE_ID_DP,
2720 					  QDF_TRACE_LEVEL_ERROR,
2721 					  FL(RNG_ERR "rxdma_err_dst_ring"));
2722 				goto fail1;
2723 			}
2724 		}
2725 	}
2726 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
2727 
2728 	/* REO reinjection ring */
2729 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
2730 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2731 			  entries)) {
2732 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2733 			  FL("dp_srng_setup failed for reo_reinject_ring"));
2734 		goto fail1;
2735 	}
2736 
2737 
2738 	/* Rx release ring */
2739 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2740 		wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx))) {
2741 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2742 			  FL("dp_srng_setup failed for rx_rel_ring"));
2743 		goto fail1;
2744 	}
2745 
2746 
2747 	/* Rx exception ring */
2748 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
2749 	if (dp_srng_setup(soc, &soc->reo_exception_ring,
2750 			  REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries)) {
2751 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2752 			  FL("dp_srng_setup failed for reo_exception_ring"));
2753 		goto fail1;
2754 	}
2755 
2756 
2757 	/* REO command and status rings */
2758 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
2759 		wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx))) {
2760 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2761 			FL("dp_srng_setup failed for reo_cmd_ring"));
2762 		goto fail1;
2763 	}
2764 
2765 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2766 	TAILQ_INIT(&soc->rx.reo_cmd_list);
2767 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2768 
2769 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
2770 		wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx))) {
2771 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2772 			FL("dp_srng_setup failed for reo_status_ring"));
2773 		goto fail1;
2774 	}
2775 
2776 
2777 	/* Reset the cpu ring map if radio is NSS offloaded */
2778 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
2779 		dp_soc_reset_cpu_ring_map(soc);
2780 		dp_soc_reset_intr_mask(soc);
2781 	}
2782 
2783 	/* Setup HW REO */
2784 	qdf_mem_zero(&reo_params, sizeof(reo_params));
2785 
2786 	if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
2787 
2788 		/*
2789 		 * Reo ring remap is not required if both radios
2790 		 * are offloaded to NSS
2791 		 */
2792 		if (!dp_reo_remap_config(soc,
2793 					&reo_params.remap1,
2794 					&reo_params.remap2))
2795 			goto out;
2796 
2797 		reo_params.rx_hash_enabled = true;
2798 	}
2799 
2800 	/* setup the global rx defrag waitlist */
2801 	TAILQ_INIT(&soc->rx.defrag.waitlist);
2802 	soc->rx.defrag.timeout_ms =
2803 		wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
2804 	soc->rx.flags.defrag_timeout_check =
2805 		wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
2806 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
2807 
2808 out:
2809 	/*
2810 	 * set the fragment destination ring
2811 	 */
2812 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2813 
2814 	hal_reo_setup(soc->hal_soc, &reo_params);
2815 
2816 	qdf_atomic_set(&soc->cmn_init_done, 1);
2817 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
2818 	return 0;
2819 fail1:
2820 	/*
2821 	 * Cleanup will be done as part of soc_detach, which will
2822 	 * be called on pdev attach failure
2823 	 */
2824 	return QDF_STATUS_E_FAILURE;
2825 }
2826 
2827 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
2828 
2829 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2830 {
2831 	struct cdp_lro_hash_config lro_hash;
2832 	QDF_STATUS status;
2833 
2834 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2835 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
2836 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2837 		dp_err("LRO, GRO and RX hash disabled");
2838 		return QDF_STATUS_E_FAILURE;
2839 	}
2840 
2841 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2842 
2843 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
2844 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
2845 		lro_hash.lro_enable = 1;
2846 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2847 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
2848 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2849 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
2850 	}
2851 
2852 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
2853 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2854 		 LRO_IPV4_SEED_ARR_SZ));
2855 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2856 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2857 		 LRO_IPV6_SEED_ARR_SZ));
2858 
2859 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2860 
2861 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
2862 		QDF_BUG(0);
2863 		dp_err("lro_hash_config not configured");
2864 		return QDF_STATUS_E_FAILURE;
2865 	}
2866 
2867 	status = soc->cdp_soc.ol_ops->lro_hash_config(pdev->ctrl_pdev,
2868 						      &lro_hash);
2869 	if (!QDF_IS_STATUS_SUCCESS(status)) {
2870 		dp_err("failed to send lro_hash_config to FW %u", status);
2871 		return status;
2872 	}
2873 
2874 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
2875 		lro_hash.lro_enable, lro_hash.tcp_flag,
2876 		lro_hash.tcp_flag_mask);
2877 
2878 	dp_info("toeplitz_hash_ipv4:");
2879 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2880 			   (void *)lro_hash.toeplitz_hash_ipv4,
2881 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2882 			   LRO_IPV4_SEED_ARR_SZ));
2883 
2884 	dp_info("toeplitz_hash_ipv6:");
2885 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2886 			   (void *)lro_hash.toeplitz_hash_ipv6,
2887 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2888 			   LRO_IPV6_SEED_ARR_SZ));
2889 
2890 	return status;
2891 }
2892 
2893 /*
2894 * dp_rxdma_ring_setup() - configure the RX DMA rings
2895 * @soc: data path SoC handle
2896 * @pdev: Physical device handle
2897 *
2898 * Return: 0 - success, > 0 - failure
2899 */
2900 #ifdef QCA_HOST2FW_RXBUF_RING
2901 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2902 	 struct dp_pdev *pdev)
2903 {
2904 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2905 	int max_mac_rings;
2906 	int i;
2907 
2908 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2909 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
2910 
2911 	for (i = 0; i < max_mac_rings; i++) {
2912 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2913 			 "%s: pdev_id %d mac_id %d",
2914 			 __func__, pdev->pdev_id, i);
2915 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
2916 			RXDMA_BUF, 1, i,
2917 			wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx))) {
2918 			QDF_TRACE(QDF_MODULE_ID_DP,
2919 				 QDF_TRACE_LEVEL_ERROR,
2920 				 FL("failed rx mac ring setup"));
2921 			return QDF_STATUS_E_FAILURE;
2922 		}
2923 	}
2924 	return QDF_STATUS_SUCCESS;
2925 }
2926 #else
2927 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2928 	 struct dp_pdev *pdev)
2929 {
2930 	return QDF_STATUS_SUCCESS;
2931 }
2932 #endif
2933 
2934 /**
2935  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2936  * @pdev - DP_PDEV handle
2937  *
2938  * Return: void
2939  */
2940 static inline void
2941 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2942 {
2943 	uint8_t map_id;
2944 	struct dp_soc *soc = pdev->soc;
2945 
2946 	if (!soc)
2947 		return;
2948 
2949 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2950 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
2951 			     default_dscp_tid_map,
2952 			     sizeof(default_dscp_tid_map));
2953 	}
2954 
2955 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
2956 		hal_tx_set_dscp_tid_map(soc->hal_soc,
2957 					default_dscp_tid_map,
2958 					map_id);
2959 	}
2960 }
2961 
2962 #ifdef IPA_OFFLOAD
2963 /**
2964  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
2965  * @soc: data path instance
2966  * @pdev: core txrx pdev context
2967  *
2968  * Return: QDF_STATUS_SUCCESS: success
2969  *         QDF_STATUS_E_RESOURCES: Error return
2970  */
2971 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2972 					   struct dp_pdev *pdev)
2973 {
2974 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2975 	int entries;
2976 
2977 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2978 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
2979 
2980 	/* Setup second Rx refill buffer ring */
2981 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2982 			  IPA_RX_REFILL_BUF_RING_IDX,
2983 			  pdev->pdev_id,
2984 			  entries)) {
2985 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2986 			FL("dp_srng_setup failed second rx refill ring"));
2987 		return QDF_STATUS_E_FAILURE;
2988 	}
2989 	return QDF_STATUS_SUCCESS;
2990 }
2991 
2992 /**
2993  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
2994  * @soc: data path instance
2995  * @pdev: core txrx pdev context
2996  *
2997  * Return: void
2998  */
2999 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3000 					      struct dp_pdev *pdev)
3001 {
3002 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3003 			IPA_RX_REFILL_BUF_RING_IDX);
3004 }
3005 
3006 #else
3007 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3008 					   struct dp_pdev *pdev)
3009 {
3010 	return QDF_STATUS_SUCCESS;
3011 }
3012 
3013 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3014 					      struct dp_pdev *pdev)
3015 {
3016 }
3017 #endif
3018 
3019 #if !defined(DISABLE_MON_CONFIG)
3020 /**
3021  * dp_mon_rings_setup() - Initialize Monitor rings based on target
3022  * @soc: soc handle
3023  * @pdev: physical device handle
3024  *
3025  * Return: nonzero on failure and zero on success
3026  */
3027 static
3028 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3029 {
3030 	int mac_id = 0;
3031 	int pdev_id = pdev->pdev_id;
3032 	int entries;
3033 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3034 
3035 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3036 
3037 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3038 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
3039 
3040 		if (soc->wlan_cfg_ctx->rxdma1_enable) {
3041 			entries =
3042 			   wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
3043 			if (dp_srng_setup(soc,
3044 					  &pdev->rxdma_mon_buf_ring[mac_id],
3045 					  RXDMA_MONITOR_BUF, 0, mac_for_pdev,
3046 					  entries)) {
3047 				QDF_TRACE(QDF_MODULE_ID_DP,
3048 					  QDF_TRACE_LEVEL_ERROR,
3049 					  FL(RNG_ERR "rxdma_mon_buf_ring "));
3050 				return QDF_STATUS_E_NOMEM;
3051 			}
3052 
3053 			entries =
3054 			   wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
3055 			if (dp_srng_setup(soc,
3056 					  &pdev->rxdma_mon_dst_ring[mac_id],
3057 					  RXDMA_MONITOR_DST, 0, mac_for_pdev,
3058 					  entries)) {
3059 				QDF_TRACE(QDF_MODULE_ID_DP,
3060 					  QDF_TRACE_LEVEL_ERROR,
3061 					  FL(RNG_ERR "rxdma_mon_dst_ring"));
3062 				return QDF_STATUS_E_NOMEM;
3063 			}
3064 
3065 			entries =
3066 			    wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3067 			if (dp_srng_setup(soc,
3068 					  &pdev->rxdma_mon_status_ring[mac_id],
3069 					  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
3070 					  entries)) {
3071 				QDF_TRACE(QDF_MODULE_ID_DP,
3072 					  QDF_TRACE_LEVEL_ERROR,
3073 					  FL(RNG_ERR "rxdma_mon_status_ring"));
3074 				return QDF_STATUS_E_NOMEM;
3075 			}
3076 
3077 			entries =
3078 			   wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
3079 			if (dp_srng_setup(soc,
3080 					  &pdev->rxdma_mon_desc_ring[mac_id],
3081 					  RXDMA_MONITOR_DESC, 0, mac_for_pdev,
3082 					  entries)) {
3083 				QDF_TRACE(QDF_MODULE_ID_DP,
3084 					  QDF_TRACE_LEVEL_ERROR,
3085 					  FL(RNG_ERR "rxdma_mon_desc_ring"));
3086 				return QDF_STATUS_E_NOMEM;
3087 			}
3088 		} else {
3089 			entries =
3090 			   wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3091 			if (dp_srng_setup(soc,
3092 					  &pdev->rxdma_mon_status_ring[mac_id],
3093 					  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
3094 					  entries)) {
3095 				QDF_TRACE(QDF_MODULE_ID_DP,
3096 					  QDF_TRACE_LEVEL_ERROR,
3097 					  FL(RNG_ERR "rxdma_mon_status_ring"));
3098 				return QDF_STATUS_E_NOMEM;
3099 			}
3100 		}
3101 	}
3102 
3103 	return QDF_STATUS_SUCCESS;
3104 }
3105 #else
3106 static
3107 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3108 {
3109 	return QDF_STATUS_SUCCESS;
3110 }
3111 #endif
3112 
3113 /*dp_iterate_update_peer_list - update peer stats on cal client timer
3114  * @pdev_hdl: pdev handle
3115  */
3116 #ifdef ATH_SUPPORT_EXT_STAT
3117 void  dp_iterate_update_peer_list(void *pdev_hdl)
3118 {
3119 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
3120 	struct dp_soc *soc = pdev->soc;
3121 	struct dp_vdev *vdev = NULL;
3122 	struct dp_peer *peer = NULL;
3123 
3124 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3125 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3126 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
3127 		DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
3128 			dp_cal_client_update_peer_stats(&peer->stats);
3129 		}
3130 	}
3131 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3132 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3133 }
3134 #else
3135 void  dp_iterate_update_peer_list(void *pdev_hdl)
3136 {
3137 }
3138 #endif
3139 
3140 /*
3141 * dp_pdev_attach_wifi3() - attach txrx pdev
3142 * @ctrl_pdev: Opaque PDEV object
3143 * @txrx_soc: Datapath SOC handle
3144 * @htc_handle: HTC handle for host-target interface
3145 * @qdf_osdev: QDF OS device
3146 * @pdev_id: PDEV ID
3147 *
3148 * Return: DP PDEV handle on success, NULL on failure
3149 */
3150 static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
3151 	struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
3152 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
3153 {
3154 	int tx_ring_size;
3155 	int tx_comp_ring_size;
3156 	int reo_dst_ring_size;
3157 	int entries;
3158 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3159 	int nss_cfg;
3160 
3161 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3162 	struct dp_pdev *pdev = NULL;
3163 
3164 	if (soc->dp_soc_reinit)
3165 		pdev = soc->pdev_list[pdev_id];
3166 	else
3167 		pdev = qdf_mem_malloc(sizeof(*pdev));
3168 
3169 	if (!pdev) {
3170 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3171 			FL("DP PDEV memory allocation failed"));
3172 		goto fail0;
3173 	}
3174 
3175 	/*
3176 	 * Variable to prevent double pdev deinitialization during
3177 	 * radio detach execution .i.e. in the absence of any vdev.
3178 	 */
3179 	pdev->pdev_deinit = 0;
3180 	pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
3181 
3182 	if (!pdev->invalid_peer) {
3183 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3184 			  FL("Invalid peer memory allocation failed"));
3185 		qdf_mem_free(pdev);
3186 		goto fail0;
3187 	}
3188 
3189 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3190 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
3191 
3192 	if (!pdev->wlan_cfg_ctx) {
3193 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3194 			FL("pdev cfg_attach failed"));
3195 
3196 		qdf_mem_free(pdev->invalid_peer);
3197 		qdf_mem_free(pdev);
3198 		goto fail0;
3199 	}
3200 
3201 	/*
3202 	 * set nss pdev config based on soc config
3203 	 */
3204 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
3205 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
3206 			(nss_cfg & (1 << pdev_id)));
3207 
3208 	pdev->soc = soc;
3209 	pdev->ctrl_pdev = ctrl_pdev;
3210 	pdev->pdev_id = pdev_id;
3211 	soc->pdev_list[pdev_id] = pdev;
3212 
3213 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
3214 	soc->pdev_count++;
3215 
3216 	TAILQ_INIT(&pdev->vdev_list);
3217 	qdf_spinlock_create(&pdev->vdev_list_lock);
3218 	pdev->vdev_count = 0;
3219 
3220 	qdf_spinlock_create(&pdev->tx_mutex);
3221 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
3222 	TAILQ_INIT(&pdev->neighbour_peers_list);
3223 	pdev->neighbour_peers_added = false;
3224 	pdev->monitor_configured = false;
3225 
3226 	if (dp_soc_cmn_setup(soc)) {
3227 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3228 			FL("dp_soc_cmn_setup failed"));
3229 		goto fail1;
3230 	}
3231 
3232 	/* Setup per PDEV TCL rings if configured */
3233 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3234 		tx_ring_size =
3235 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
3236 		tx_comp_ring_size =
3237 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
3238 
3239 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
3240 			pdev_id, pdev_id, tx_ring_size)) {
3241 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3242 				FL("dp_srng_setup failed for tcl_data_ring"));
3243 			goto fail1;
3244 		}
3245 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
3246 			WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
3247 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3248 				FL("dp_srng_setup failed for tx_comp_ring"));
3249 			goto fail1;
3250 		}
3251 		soc->num_tcl_data_rings++;
3252 	}
3253 
3254 	/* Tx specific init */
3255 	if (dp_tx_pdev_attach(pdev)) {
3256 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3257 			FL("dp_tx_pdev_attach failed"));
3258 		goto fail1;
3259 	}
3260 
3261 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
3262 	/* Setup per PDEV REO rings if configured */
3263 	if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
3264 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
3265 			pdev_id, pdev_id, reo_dst_ring_size)) {
3266 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3267 				FL("dp_srng_setup failed for reo_dest_ringn"));
3268 			goto fail1;
3269 		}
3270 		soc->num_reo_dest_rings++;
3271 
3272 	}
3273 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
3274 		wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx))) {
3275 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3276 			 FL("dp_srng_setup failed rx refill ring"));
3277 		goto fail1;
3278 	}
3279 
3280 	if (dp_rxdma_ring_setup(soc, pdev)) {
3281 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3282 			 FL("RXDMA ring config failed"));
3283 		goto fail1;
3284 	}
3285 
3286 	if (dp_mon_rings_setup(soc, pdev)) {
3287 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3288 			  FL("MONITOR rings setup failed"));
3289 		goto fail1;
3290 	}
3291 
3292 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
3293 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
3294 		if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
3295 				  0, pdev_id,
3296 				  entries)) {
3297 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3298 				  FL(RNG_ERR "rxdma_err_dst_ring"));
3299 			goto fail1;
3300 		}
3301 	}
3302 
3303 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
3304 		goto fail1;
3305 
3306 	if (dp_ipa_ring_resource_setup(soc, pdev))
3307 		goto fail1;
3308 
3309 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
3310 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3311 			FL("dp_ipa_uc_attach failed"));
3312 		goto fail1;
3313 	}
3314 
3315 	/* Rx specific init */
3316 	if (dp_rx_pdev_attach(pdev)) {
3317 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3318 			  FL("dp_rx_pdev_attach failed"));
3319 		goto fail1;
3320 	}
3321 
3322 	DP_STATS_INIT(pdev);
3323 
3324 	/* Monitor filter init */
3325 	pdev->mon_filter_mode = MON_FILTER_ALL;
3326 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
3327 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
3328 	pdev->fp_data_filter = FILTER_DATA_ALL;
3329 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
3330 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
3331 	pdev->mo_data_filter = FILTER_DATA_ALL;
3332 
3333 	dp_local_peer_id_pool_init(pdev);
3334 
3335 	dp_dscp_tid_map_setup(pdev);
3336 
3337 	/* Rx monitor mode specific init */
3338 	if (dp_rx_pdev_mon_attach(pdev)) {
3339 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3340 				"dp_rx_pdev_mon_attach failed");
3341 		goto fail1;
3342 	}
3343 
3344 	if (dp_wdi_event_attach(pdev)) {
3345 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3346 				"dp_wdi_evet_attach failed");
3347 		goto fail1;
3348 	}
3349 
3350 	/* set the reo destination during initialization */
3351 	pdev->reo_dest = pdev->pdev_id + 1;
3352 
3353 	/*
3354 	 * initialize ppdu tlv list
3355 	 */
3356 	TAILQ_INIT(&pdev->ppdu_info_list);
3357 	pdev->tlv_count = 0;
3358 	pdev->list_depth = 0;
3359 
3360 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
3361 
3362 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
3363 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
3364 			      TRUE);
3365 
3366 	/* initlialize cal client timer */
3367 	dp_cal_client_attach(&pdev->cal_client_ctx, pdev, pdev->soc->osdev,
3368 			     &dp_iterate_update_peer_list);
3369 
3370 	return (struct cdp_pdev *)pdev;
3371 
3372 fail1:
3373 	dp_pdev_detach((struct cdp_pdev *)pdev, 0);
3374 
3375 fail0:
3376 	return NULL;
3377 }
3378 
3379 /*
3380 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
3381 * @soc: data path SoC handle
3382 * @pdev: Physical device handle
3383 *
3384 * Return: void
3385 */
3386 #ifdef QCA_HOST2FW_RXBUF_RING
3387 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3388 	 struct dp_pdev *pdev)
3389 {
3390 	int max_mac_rings =
3391 		 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
3392 	int i;
3393 
3394 	max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
3395 				max_mac_rings : MAX_RX_MAC_RINGS;
3396 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
3397 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
3398 			 RXDMA_BUF, 1);
3399 
3400 	qdf_timer_free(&soc->mon_reap_timer);
3401 }
3402 #else
3403 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3404 	 struct dp_pdev *pdev)
3405 {
3406 }
3407 #endif
3408 
3409 /*
3410  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3411  * @pdev: device object
3412  *
3413  * Return: void
3414  */
3415 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3416 {
3417 	struct dp_neighbour_peer *peer = NULL;
3418 	struct dp_neighbour_peer *temp_peer = NULL;
3419 
3420 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3421 			neighbour_peer_list_elem, temp_peer) {
3422 		/* delete this peer from the list */
3423 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
3424 				peer, neighbour_peer_list_elem);
3425 		qdf_mem_free(peer);
3426 	}
3427 
3428 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3429 }
3430 
3431 /**
3432 * dp_htt_ppdu_stats_detach() - detach stats resources
3433 * @pdev: Datapath PDEV handle
3434 *
3435 * Return: void
3436 */
3437 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3438 {
3439 	struct ppdu_info *ppdu_info, *ppdu_info_next;
3440 
3441 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3442 			ppdu_info_list_elem, ppdu_info_next) {
3443 		if (!ppdu_info)
3444 			break;
3445 		qdf_assert_always(ppdu_info->nbuf);
3446 		qdf_nbuf_free(ppdu_info->nbuf);
3447 		qdf_mem_free(ppdu_info);
3448 	}
3449 }
3450 
3451 #if !defined(DISABLE_MON_CONFIG)
3452 
3453 static
3454 void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
3455 			 int mac_id)
3456 {
3457 		if (soc->wlan_cfg_ctx->rxdma1_enable) {
3458 			dp_srng_cleanup(soc,
3459 					&pdev->rxdma_mon_buf_ring[mac_id],
3460 					RXDMA_MONITOR_BUF, 0);
3461 
3462 			dp_srng_cleanup(soc,
3463 					&pdev->rxdma_mon_dst_ring[mac_id],
3464 					RXDMA_MONITOR_DST, 0);
3465 
3466 			dp_srng_cleanup(soc,
3467 					&pdev->rxdma_mon_status_ring[mac_id],
3468 					RXDMA_MONITOR_STATUS, 0);
3469 
3470 			dp_srng_cleanup(soc,
3471 					&pdev->rxdma_mon_desc_ring[mac_id],
3472 					RXDMA_MONITOR_DESC, 0);
3473 
3474 			dp_srng_cleanup(soc,
3475 					&pdev->rxdma_err_dst_ring[mac_id],
3476 					RXDMA_DST, 0);
3477 		} else {
3478 			dp_srng_cleanup(soc,
3479 					&pdev->rxdma_mon_status_ring[mac_id],
3480 					RXDMA_MONITOR_STATUS, 0);
3481 
3482 			dp_srng_cleanup(soc,
3483 					&pdev->rxdma_err_dst_ring[mac_id],
3484 					RXDMA_DST, 0);
3485 		}
3486 
3487 }
3488 #else
3489 static void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
3490 				int mac_id)
3491 {
3492 }
3493 #endif
3494 
3495 /**
3496  * dp_mon_ring_deinit() - Placeholder to deinitialize Monitor rings
3497  *
3498  * @soc: soc handle
3499  * @pdev: datapath physical dev handle
3500  * @mac_id: mac number
3501  *
3502  * Return: None
3503  */
3504 static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3505 			       int mac_id)
3506 {
3507 }
3508 
3509 /**
3510  * dp_pdev_mem_reset() - Reset txrx pdev memory
3511  * @pdev: dp pdev handle
3512  *
3513  * Return: None
3514  */
3515 static void dp_pdev_mem_reset(struct dp_pdev *pdev)
3516 {
3517 	uint16_t len = 0;
3518 	uint8_t *dp_pdev_offset = (uint8_t *)pdev;
3519 
3520 	len = sizeof(struct dp_pdev) -
3521 		offsetof(struct dp_pdev, pdev_deinit) -
3522 		sizeof(pdev->pdev_deinit);
3523 	dp_pdev_offset = dp_pdev_offset +
3524 			 offsetof(struct dp_pdev, pdev_deinit) +
3525 			 sizeof(pdev->pdev_deinit);
3526 
3527 	qdf_mem_zero(dp_pdev_offset, len);
3528 }
3529 
3530 /**
3531  * dp_pdev_deinit() - Deinit txrx pdev
3532  * @txrx_pdev: Datapath PDEV handle
3533  * @force: Force deinit
3534  *
3535  * Return: None
3536  */
3537 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
3538 {
3539 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3540 	struct dp_soc *soc = pdev->soc;
3541 	qdf_nbuf_t curr_nbuf, next_nbuf;
3542 	int mac_id;
3543 
3544 	/*
3545 	 * Prevent double pdev deinitialization during radio detach
3546 	 * execution .i.e. in the absence of any vdev
3547 	 */
3548 	if (pdev->pdev_deinit)
3549 		return;
3550 
3551 	pdev->pdev_deinit = 1;
3552 
3553 	dp_wdi_event_detach(pdev);
3554 
3555 	dp_tx_pdev_detach(pdev);
3556 
3557 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3558 		dp_srng_deinit(soc, &soc->tcl_data_ring[pdev->pdev_id],
3559 			       TCL_DATA, pdev->pdev_id);
3560 		dp_srng_deinit(soc, &soc->tx_comp_ring[pdev->pdev_id],
3561 			       WBM2SW_RELEASE, pdev->pdev_id);
3562 	}
3563 
3564 	dp_pktlogmod_exit(pdev);
3565 
3566 	dp_rx_pdev_detach(pdev);
3567 	dp_rx_pdev_mon_detach(pdev);
3568 	dp_neighbour_peers_detach(pdev);
3569 	qdf_spinlock_destroy(&pdev->tx_mutex);
3570 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
3571 
3572 	dp_ipa_uc_detach(soc, pdev);
3573 
3574 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
3575 
3576 	/* Cleanup per PDEV REO rings if configured */
3577 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3578 		dp_srng_deinit(soc, &soc->reo_dest_ring[pdev->pdev_id],
3579 			       REO_DST, pdev->pdev_id);
3580 	}
3581 
3582 	dp_srng_deinit(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
3583 
3584 	dp_rxdma_ring_cleanup(soc, pdev);
3585 
3586 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3587 		dp_mon_ring_deinit(soc, pdev, mac_id);
3588 		dp_srng_deinit(soc, &pdev->rxdma_err_dst_ring[mac_id],
3589 			       RXDMA_DST, 0);
3590 	}
3591 
3592 	curr_nbuf = pdev->invalid_peer_head_msdu;
3593 	while (curr_nbuf) {
3594 		next_nbuf = qdf_nbuf_next(curr_nbuf);
3595 		qdf_nbuf_free(curr_nbuf);
3596 		curr_nbuf = next_nbuf;
3597 	}
3598 	pdev->invalid_peer_head_msdu = NULL;
3599 	pdev->invalid_peer_tail_msdu = NULL;
3600 
3601 	dp_htt_ppdu_stats_detach(pdev);
3602 
3603 	qdf_nbuf_free(pdev->sojourn_buf);
3604 
3605 	dp_cal_client_detach(&pdev->cal_client_ctx);
3606 
3607 	soc->pdev_count--;
3608 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
3609 	qdf_mem_free(pdev->invalid_peer);
3610 	qdf_mem_free(pdev->dp_txrx_handle);
3611 	dp_pdev_mem_reset(pdev);
3612 }
3613 
3614 /**
3615  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
3616  * @txrx_pdev: Datapath PDEV handle
3617  * @force: Force deinit
3618  *
3619  * Return: None
3620  */
3621 static void dp_pdev_deinit_wifi3(struct cdp_pdev *txrx_pdev, int force)
3622 {
3623 	dp_pdev_deinit(txrx_pdev, force);
3624 }
3625 
3626 /*
3627  * dp_pdev_detach() - Complete rest of pdev detach
3628  * @txrx_pdev: Datapath PDEV handle
3629  * @force: Force deinit
3630  *
3631  * Return: None
3632  */
3633 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
3634 {
3635 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3636 	struct dp_soc *soc = pdev->soc;
3637 	int mac_id;
3638 
3639 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3640 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
3641 				TCL_DATA, pdev->pdev_id);
3642 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
3643 				WBM2SW_RELEASE, pdev->pdev_id);
3644 	}
3645 
3646 	dp_mon_link_free(pdev);
3647 
3648 	/* Cleanup per PDEV REO rings if configured */
3649 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3650 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
3651 				REO_DST, pdev->pdev_id);
3652 	}
3653 
3654 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
3655 
3656 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3657 		dp_mon_ring_cleanup(soc, pdev, mac_id);
3658 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3659 				RXDMA_DST, 0);
3660 	}
3661 
3662 	soc->pdev_list[pdev->pdev_id] = NULL;
3663 	qdf_mem_free(pdev);
3664 }
3665 
3666 /*
3667  * dp_pdev_detach_wifi3() - detach txrx pdev
3668  * @txrx_pdev: Datapath PDEV handle
3669  * @force: Force detach
3670  *
3671  * Return: None
3672  */
3673 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
3674 {
3675 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3676 	struct dp_soc *soc = pdev->soc;
3677 
3678 	if (soc->dp_soc_reinit) {
3679 		dp_pdev_detach(txrx_pdev, force);
3680 	} else {
3681 		dp_pdev_deinit(txrx_pdev, force);
3682 		dp_pdev_detach(txrx_pdev, force);
3683 	}
3684 }
3685 
3686 /*
3687  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
3688  * @soc: DP SOC handle
3689  */
3690 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
3691 {
3692 	struct reo_desc_list_node *desc;
3693 	struct dp_rx_tid *rx_tid;
3694 
3695 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3696 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
3697 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3698 		rx_tid = &desc->rx_tid;
3699 		qdf_mem_unmap_nbytes_single(soc->osdev,
3700 			rx_tid->hw_qdesc_paddr,
3701 			QDF_DMA_BIDIRECTIONAL,
3702 			rx_tid->hw_qdesc_alloc_size);
3703 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3704 		qdf_mem_free(desc);
3705 	}
3706 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3707 	qdf_list_destroy(&soc->reo_desc_freelist);
3708 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
3709 }
3710 
3711 /**
3712  * dp_soc_mem_reset() - Reset Dp Soc memory
3713  * @soc: DP handle
3714  *
3715  * Return: None
3716  */
3717 static void dp_soc_mem_reset(struct dp_soc *soc)
3718 {
3719 	uint16_t len = 0;
3720 	uint8_t *dp_soc_offset = (uint8_t *)soc;
3721 
3722 	len = sizeof(struct dp_soc) -
3723 		offsetof(struct dp_soc, dp_soc_reinit) -
3724 		sizeof(soc->dp_soc_reinit);
3725 	dp_soc_offset = dp_soc_offset +
3726 			offsetof(struct dp_soc, dp_soc_reinit) +
3727 			sizeof(soc->dp_soc_reinit);
3728 
3729 	qdf_mem_zero(dp_soc_offset, len);
3730 }
3731 
3732 /**
3733  * dp_soc_deinit() - Deinitialize txrx SOC
3734  * @txrx_soc: Opaque DP SOC handle
3735  *
3736  * Return: None
3737  */
3738 static void dp_soc_deinit(void *txrx_soc)
3739 {
3740 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3741 	int i;
3742 
3743 	qdf_atomic_set(&soc->cmn_init_done, 0);
3744 
3745 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3746 		if (soc->pdev_list[i])
3747 			dp_pdev_deinit((struct cdp_pdev *)
3748 					soc->pdev_list[i], 1);
3749 	}
3750 
3751 	qdf_flush_work(&soc->htt_stats.work);
3752 	qdf_disable_work(&soc->htt_stats.work);
3753 
3754 	/* Free pending htt stats messages */
3755 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
3756 
3757 	dp_reo_cmdlist_destroy(soc);
3758 
3759 	dp_peer_find_detach(soc);
3760 
3761 	/* Free the ring memories */
3762 	/* Common rings */
3763 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3764 
3765 	/* Tx data rings */
3766 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3767 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3768 			dp_srng_deinit(soc, &soc->tcl_data_ring[i],
3769 				       TCL_DATA, i);
3770 			dp_srng_deinit(soc, &soc->tx_comp_ring[i],
3771 				       WBM2SW_RELEASE, i);
3772 		}
3773 	}
3774 
3775 	/* TCL command and status rings */
3776 	dp_srng_deinit(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3777 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3778 
3779 	/* Rx data rings */
3780 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3781 		soc->num_reo_dest_rings =
3782 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3783 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
3784 			/* TODO: Get number of rings and ring sizes
3785 			 * from wlan_cfg
3786 			 */
3787 			dp_srng_deinit(soc, &soc->reo_dest_ring[i],
3788 				       REO_DST, i);
3789 		}
3790 	}
3791 	/* REO reinjection ring */
3792 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3793 
3794 	/* Rx release ring */
3795 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3796 
3797 	/* Rx exception ring */
3798 	/* TODO: Better to store ring_type and ring_num in
3799 	 * dp_srng during setup
3800 	 */
3801 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3802 
3803 	/* REO command and status rings */
3804 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3805 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
3806 
3807 	dp_soc_wds_detach(soc);
3808 
3809 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
3810 	qdf_spinlock_destroy(&soc->htt_stats.lock);
3811 
3812 	htt_soc_htc_dealloc(soc->htt_handle);
3813 
3814 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3815 
3816 	dp_reo_cmdlist_destroy(soc);
3817 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3818 	dp_reo_desc_freelist_destroy(soc);
3819 
3820 	qdf_spinlock_destroy(&soc->ast_lock);
3821 
3822 	dp_soc_mem_reset(soc);
3823 }
3824 
3825 /**
3826  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
3827  * @txrx_soc: Opaque DP SOC handle
3828  *
3829  * Return: None
3830  */
3831 static void dp_soc_deinit_wifi3(void *txrx_soc)
3832 {
3833 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3834 
3835 	soc->dp_soc_reinit = 1;
3836 	dp_soc_deinit(txrx_soc);
3837 }
3838 
3839 /*
3840  * dp_soc_detach() - Detach rest of txrx SOC
3841  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
3842  *
3843  * Return: None
3844  */
3845 static void dp_soc_detach(void *txrx_soc)
3846 {
3847 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3848 	int i;
3849 
3850 	qdf_atomic_set(&soc->cmn_init_done, 0);
3851 
3852 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
3853 	 * SW descriptors
3854 	 */
3855 
3856 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3857 		if (soc->pdev_list[i])
3858 			dp_pdev_detach((struct cdp_pdev *)
3859 					     soc->pdev_list[i], 1);
3860 	}
3861 
3862 	/* Free the ring memories */
3863 	/* Common rings */
3864 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3865 
3866 	dp_tx_soc_detach(soc);
3867 
3868 	/* Tx data rings */
3869 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3870 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3871 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
3872 				TCL_DATA, i);
3873 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
3874 				WBM2SW_RELEASE, i);
3875 		}
3876 	}
3877 
3878 	/* TCL command and status rings */
3879 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3880 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3881 
3882 	/* Rx data rings */
3883 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3884 		soc->num_reo_dest_rings =
3885 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3886 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
3887 			/* TODO: Get number of rings and ring sizes
3888 			 * from wlan_cfg
3889 			 */
3890 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
3891 				REO_DST, i);
3892 		}
3893 	}
3894 	/* REO reinjection ring */
3895 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3896 
3897 	/* Rx release ring */
3898 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3899 
3900 	/* Rx exception ring */
3901 	/* TODO: Better to store ring_type and ring_num in
3902 	 * dp_srng during setup
3903 	 */
3904 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3905 
3906 	/* REO command and status rings */
3907 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3908 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
3909 	dp_hw_link_desc_pool_cleanup(soc);
3910 
3911 	htt_soc_detach(soc->htt_handle);
3912 	soc->dp_soc_reinit = 0;
3913 
3914 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
3915 
3916 	qdf_mem_free(soc);
3917 }
3918 
3919 /*
3920  * dp_soc_detach_wifi3() - Detach txrx SOC
3921  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
3922  *
3923  * Return: None
3924  */
3925 static void dp_soc_detach_wifi3(void *txrx_soc)
3926 {
3927 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3928 
3929 	if (soc->dp_soc_reinit) {
3930 		dp_soc_detach(txrx_soc);
3931 	} else {
3932 		dp_soc_deinit(txrx_soc);
3933 		dp_soc_detach(txrx_soc);
3934 	}
3935 
3936 }
3937 
3938 #if !defined(DISABLE_MON_CONFIG)
3939 /**
3940  * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
3941  * @soc: soc handle
3942  * @pdev: physical device handle
3943  * @mac_id: ring number
3944  * @mac_for_pdev: mac_id
3945  *
3946  * Return: non-zero for failure, zero for success
3947  */
3948 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
3949 					struct dp_pdev *pdev,
3950 					int mac_id,
3951 					int mac_for_pdev)
3952 {
3953 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3954 
3955 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
3956 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
3957 					pdev->rxdma_mon_buf_ring[mac_id]
3958 					.hal_srng,
3959 					RXDMA_MONITOR_BUF);
3960 
3961 		if (status != QDF_STATUS_SUCCESS) {
3962 			dp_err("Failed to send htt srng setup message for Rxdma mon buf ring");
3963 			return status;
3964 		}
3965 
3966 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
3967 					pdev->rxdma_mon_dst_ring[mac_id]
3968 					.hal_srng,
3969 					RXDMA_MONITOR_DST);
3970 
3971 		if (status != QDF_STATUS_SUCCESS) {
3972 			dp_err("Failed to send htt srng setup message for Rxdma mon dst ring");
3973 			return status;
3974 		}
3975 
3976 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
3977 					pdev->rxdma_mon_status_ring[mac_id]
3978 					.hal_srng,
3979 					RXDMA_MONITOR_STATUS);
3980 
3981 		if (status != QDF_STATUS_SUCCESS) {
3982 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
3983 			return status;
3984 		}
3985 
3986 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
3987 					pdev->rxdma_mon_desc_ring[mac_id]
3988 					.hal_srng,
3989 					RXDMA_MONITOR_DESC);
3990 
3991 		if (status != QDF_STATUS_SUCCESS) {
3992 			dp_err("Failed to send htt srng message for Rxdma mon desc ring");
3993 			return status;
3994 		}
3995 	} else {
3996 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
3997 					pdev->rxdma_mon_status_ring[mac_id]
3998 					.hal_srng,
3999 					RXDMA_MONITOR_STATUS);
4000 
4001 		if (status != QDF_STATUS_SUCCESS) {
4002 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4003 			return status;
4004 		}
4005 	}
4006 
4007 	return status;
4008 
4009 }
4010 #else
4011 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4012 					struct dp_pdev *pdev,
4013 					int mac_id,
4014 					int mac_for_pdev)
4015 {
4016 	return QDF_STATUS_SUCCESS;
4017 }
4018 #endif
4019 
4020 /*
4021  * dp_rxdma_ring_config() - configure the RX DMA rings
4022  *
4023  * This function is used to configure the MAC rings.
4024  * On MCL host provides buffers in Host2FW ring
4025  * FW refills (copies) buffers to the ring and updates
4026  * ring_idx in register
4027  *
4028  * @soc: data path SoC handle
4029  *
4030  * Return: zero on success, non-zero on failure
4031  */
4032 #ifdef QCA_HOST2FW_RXBUF_RING
4033 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4034 {
4035 	int i;
4036 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4037 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4038 		struct dp_pdev *pdev = soc->pdev_list[i];
4039 
4040 		if (pdev) {
4041 			int mac_id;
4042 			bool dbs_enable = 0;
4043 			int max_mac_rings =
4044 				 wlan_cfg_get_num_mac_rings
4045 				(pdev->wlan_cfg_ctx);
4046 
4047 			htt_srng_setup(soc->htt_handle, 0,
4048 				 pdev->rx_refill_buf_ring.hal_srng,
4049 				 RXDMA_BUF);
4050 
4051 			if (pdev->rx_refill_buf_ring2.hal_srng)
4052 				htt_srng_setup(soc->htt_handle, 0,
4053 					pdev->rx_refill_buf_ring2.hal_srng,
4054 					RXDMA_BUF);
4055 
4056 			if (soc->cdp_soc.ol_ops->
4057 				is_hw_dbs_2x2_capable) {
4058 				dbs_enable = soc->cdp_soc.ol_ops->
4059 					is_hw_dbs_2x2_capable(soc->ctrl_psoc);
4060 			}
4061 
4062 			if (dbs_enable) {
4063 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4064 				QDF_TRACE_LEVEL_ERROR,
4065 				FL("DBS enabled max_mac_rings %d"),
4066 					 max_mac_rings);
4067 			} else {
4068 				max_mac_rings = 1;
4069 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4070 					 QDF_TRACE_LEVEL_ERROR,
4071 					 FL("DBS disabled, max_mac_rings %d"),
4072 					 max_mac_rings);
4073 			}
4074 
4075 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4076 					 FL("pdev_id %d max_mac_rings %d"),
4077 					 pdev->pdev_id, max_mac_rings);
4078 
4079 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
4080 				int mac_for_pdev = dp_get_mac_id_for_pdev(
4081 							mac_id, pdev->pdev_id);
4082 
4083 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4084 					 QDF_TRACE_LEVEL_ERROR,
4085 					 FL("mac_id %d"), mac_for_pdev);
4086 
4087 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4088 					 pdev->rx_mac_buf_ring[mac_id]
4089 						.hal_srng,
4090 					 RXDMA_BUF);
4091 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4092 					pdev->rxdma_err_dst_ring[mac_id]
4093 						.hal_srng,
4094 					RXDMA_DST);
4095 
4096 				/* Configure monitor mode rings */
4097 				status = dp_mon_htt_srng_setup(soc, pdev,
4098 							       mac_id,
4099 							       mac_for_pdev);
4100 				if (status != QDF_STATUS_SUCCESS) {
4101 					dp_err("Failed to send htt monitor messages to target");
4102 					return status;
4103 				}
4104 
4105 			}
4106 		}
4107 	}
4108 
4109 	/*
4110 	 * Timer to reap rxdma status rings.
4111 	 * Needed until we enable ppdu end interrupts
4112 	 */
4113 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
4114 			dp_service_mon_rings, (void *)soc,
4115 			QDF_TIMER_TYPE_WAKE_APPS);
4116 	soc->reap_timer_init = 1;
4117 	return status;
4118 }
4119 #else
4120 /* This is only for WIN */
4121 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4122 {
4123 	int i;
4124 	int mac_id;
4125 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4126 
4127 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4128 		struct dp_pdev *pdev = soc->pdev_list[i];
4129 
4130 		if (pdev == NULL)
4131 			continue;
4132 
4133 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4134 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
4135 
4136 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4137 				pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
4138 #ifndef DISABLE_MON_CONFIG
4139 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4140 				pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4141 				RXDMA_MONITOR_BUF);
4142 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4143 				pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
4144 				RXDMA_MONITOR_DST);
4145 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4146 				pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4147 				RXDMA_MONITOR_STATUS);
4148 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4149 				pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
4150 				RXDMA_MONITOR_DESC);
4151 #endif
4152 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4153 				pdev->rxdma_err_dst_ring[mac_id].hal_srng,
4154 				RXDMA_DST);
4155 		}
4156 	}
4157 	return status;
4158 }
4159 #endif
4160 
4161 /*
4162  * dp_soc_attach_target_wifi3() - SOC initialization in the target
4163  * @cdp_soc: Opaque Datapath SOC handle
4164  *
4165  * Return: zero on success, non-zero on failure
4166  */
4167 static QDF_STATUS
4168 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
4169 {
4170 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4171 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4172 
4173 	htt_soc_attach_target(soc->htt_handle);
4174 
4175 	status = dp_rxdma_ring_config(soc);
4176 	if (status != QDF_STATUS_SUCCESS) {
4177 		dp_err("Failed to send htt srng setup messages to target");
4178 		return status;
4179 	}
4180 
4181 	DP_STATS_INIT(soc);
4182 
4183 	/* initialize work queue for stats processing */
4184 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
4185 
4186 	return QDF_STATUS_SUCCESS;
4187 }
4188 
4189 /*
4190  * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
4191  * @txrx_soc: Datapath SOC handle
4192  */
4193 static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
4194 {
4195 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
4196 	return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
4197 }
4198 /*
4199  * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
4200  * @txrx_soc: Datapath SOC handle
4201  * @nss_cfg: nss config
4202  */
4203 static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
4204 {
4205 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
4206 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
4207 
4208 	wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
4209 
4210 	/*
4211 	 * TODO: masked out based on the per offloaded radio
4212 	 */
4213 	switch (config) {
4214 	case dp_nss_cfg_default:
4215 		break;
4216 	case dp_nss_cfg_dbdc:
4217 	case dp_nss_cfg_dbtc:
4218 		wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
4219 		wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
4220 		wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
4221 		wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
4222 		break;
4223 	default:
4224 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4225 			  "Invalid offload config %d", config);
4226 	}
4227 
4228 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4229 		  FL("nss-wifi<0> nss config is enabled"));
4230 }
4231 /*
4232 * dp_vdev_attach_wifi3() - attach txrx vdev
4233 * @txrx_pdev: Datapath PDEV handle
4234 * @vdev_mac_addr: MAC address of the virtual interface
4235 * @vdev_id: VDEV Id
4236 * @wlan_op_mode: VDEV operating mode
4237 *
4238 * Return: DP VDEV handle on success, NULL on failure
4239 */
4240 static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
4241 	uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
4242 {
4243 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4244 	struct dp_soc *soc = pdev->soc;
4245 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
4246 
4247 	if (!vdev) {
4248 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4249 			FL("DP VDEV memory allocation failed"));
4250 		goto fail0;
4251 	}
4252 
4253 	vdev->pdev = pdev;
4254 	vdev->vdev_id = vdev_id;
4255 	vdev->opmode = op_mode;
4256 	vdev->osdev = soc->osdev;
4257 
4258 	vdev->osif_rx = NULL;
4259 	vdev->osif_rsim_rx_decap = NULL;
4260 	vdev->osif_get_key = NULL;
4261 	vdev->osif_rx_mon = NULL;
4262 	vdev->osif_tx_free_ext = NULL;
4263 	vdev->osif_vdev = NULL;
4264 
4265 	vdev->delete.pending = 0;
4266 	vdev->safemode = 0;
4267 	vdev->drop_unenc = 1;
4268 	vdev->sec_type = cdp_sec_type_none;
4269 #ifdef notyet
4270 	vdev->filters_num = 0;
4271 #endif
4272 
4273 	qdf_mem_copy(
4274 		&vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
4275 
4276 	/* TODO: Initialize default HTT meta data that will be used in
4277 	 * TCL descriptors for packets transmitted from this VDEV
4278 	 */
4279 
4280 	TAILQ_INIT(&vdev->peer_list);
4281 
4282 	if ((soc->intr_mode == DP_INTR_POLL) &&
4283 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
4284 		if ((pdev->vdev_count == 0) ||
4285 		    (wlan_op_mode_monitor == vdev->opmode))
4286 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
4287 	}
4288 
4289 	if (wlan_op_mode_monitor == vdev->opmode) {
4290 		pdev->monitor_vdev = vdev;
4291 		return (struct cdp_vdev *)vdev;
4292 	}
4293 
4294 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
4295 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
4296 	vdev->dscp_tid_map_id = 0;
4297 	vdev->mcast_enhancement_en = 0;
4298 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
4299 
4300 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4301 	/* add this vdev into the pdev's list */
4302 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
4303 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4304 	pdev->vdev_count++;
4305 
4306 	dp_tx_vdev_attach(vdev);
4307 
4308 	if (pdev->vdev_count == 1)
4309 		dp_lro_hash_setup(soc, pdev);
4310 
4311 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4312 		"Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
4313 	DP_STATS_INIT(vdev);
4314 
4315 	if (wlan_op_mode_sta == vdev->opmode)
4316 		dp_peer_create_wifi3((struct cdp_vdev *)vdev,
4317 							vdev->mac_addr.raw,
4318 							NULL);
4319 
4320 	return (struct cdp_vdev *)vdev;
4321 
4322 fail0:
4323 	return NULL;
4324 }
4325 
4326 /**
4327  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
4328  * @vdev: Datapath VDEV handle
4329  * @osif_vdev: OSIF vdev handle
4330  * @ctrl_vdev: UMAC vdev handle
4331  * @txrx_ops: Tx and Rx operations
4332  *
4333  * Return: DP VDEV handle on success, NULL on failure
4334  */
4335 static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
4336 	void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
4337 	struct ol_txrx_ops *txrx_ops)
4338 {
4339 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4340 	vdev->osif_vdev = osif_vdev;
4341 	vdev->ctrl_vdev = ctrl_vdev;
4342 	vdev->osif_rx = txrx_ops->rx.rx;
4343 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
4344 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
4345 	vdev->osif_get_key = txrx_ops->get_key;
4346 	vdev->osif_rx_mon = txrx_ops->rx.mon;
4347 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
4348 #ifdef notyet
4349 #if ATH_SUPPORT_WAPI
4350 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
4351 #endif
4352 #endif
4353 #ifdef UMAC_SUPPORT_PROXY_ARP
4354 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
4355 #endif
4356 	vdev->me_convert = txrx_ops->me_convert;
4357 
4358 	/* TODO: Enable the following once Tx code is integrated */
4359 	if (vdev->mesh_vdev)
4360 		txrx_ops->tx.tx = dp_tx_send_mesh;
4361 	else
4362 		txrx_ops->tx.tx = dp_tx_send;
4363 
4364 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
4365 
4366 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
4367 		"DP Vdev Register success");
4368 }
4369 
4370 /**
4371  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
4372  * @vdev: Datapath VDEV handle
4373  *
4374  * Return: void
4375  */
4376 static void dp_vdev_flush_peers(struct dp_vdev *vdev)
4377 {
4378 	struct dp_pdev *pdev = vdev->pdev;
4379 	struct dp_soc *soc = pdev->soc;
4380 	struct dp_peer *peer;
4381 	uint16_t *peer_ids;
4382 	uint8_t i = 0, j = 0;
4383 
4384 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
4385 	if (!peer_ids) {
4386 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4387 			"DP alloc failure - unable to flush peers");
4388 		return;
4389 	}
4390 
4391 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4392 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4393 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
4394 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
4395 				if (j < soc->max_peers)
4396 					peer_ids[j++] = peer->peer_ids[i];
4397 	}
4398 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4399 
4400 	for (i = 0; i < j ; i++) {
4401 		peer = dp_peer_find_by_id(soc, peer_ids[i]);
4402 		if (peer) {
4403 			dp_info("peer: %pM is getting flush",
4404 				peer->mac_addr.raw);
4405 			dp_peer_delete_wifi3(peer, 0);
4406 			/*
4407 			 * we need to call dp_peer_unref_del_find_by_id()
4408 			 * to remove additional ref count incremented
4409 			 * by dp_peer_find_by_id() call.
4410 			 *
4411 			 * Hold the ref count while executing
4412 			 * dp_peer_delete_wifi3() call.
4413 			 *
4414 			 */
4415 			dp_peer_unref_del_find_by_id(peer);
4416 			dp_rx_peer_unmap_handler(soc, peer_ids[i],
4417 						 vdev->vdev_id,
4418 						 peer->mac_addr.raw, 0);
4419 		}
4420 	}
4421 
4422 	qdf_mem_free(peer_ids);
4423 
4424 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4425 		FL("Flushed peers for vdev object %pK "), vdev);
4426 }
4427 
4428 /*
4429  * dp_vdev_detach_wifi3() - Detach txrx vdev
4430  * @txrx_vdev:		Datapath VDEV handle
4431  * @callback:		Callback OL_IF on completion of detach
4432  * @cb_context:	Callback context
4433  *
4434  */
4435 static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
4436 	ol_txrx_vdev_delete_cb callback, void *cb_context)
4437 {
4438 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4439 	struct dp_pdev *pdev = vdev->pdev;
4440 	struct dp_soc *soc = pdev->soc;
4441 	struct dp_neighbour_peer *peer = NULL;
4442 	struct dp_neighbour_peer *temp_peer = NULL;
4443 
4444 	/* preconditions */
4445 	qdf_assert(vdev);
4446 
4447 	if (wlan_op_mode_monitor == vdev->opmode)
4448 		goto free_vdev;
4449 
4450 	if (wlan_op_mode_sta == vdev->opmode)
4451 		dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
4452 
4453 	/*
4454 	 * If Target is hung, flush all peers before detaching vdev
4455 	 * this will free all references held due to missing
4456 	 * unmap commands from Target
4457 	 */
4458 	if ((hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET) ||
4459 	    !hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
4460 		dp_vdev_flush_peers(vdev);
4461 
4462 	/*
4463 	 * Use peer_ref_mutex while accessing peer_list, in case
4464 	 * a peer is in the process of being removed from the list.
4465 	 */
4466 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4467 	/* check that the vdev has no peers allocated */
4468 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
4469 		/* debug print - will be removed later */
4470 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
4471 			FL("not deleting vdev object %pK (%pM)"
4472 			"until deletion finishes for all its peers"),
4473 			vdev, vdev->mac_addr.raw);
4474 		/* indicate that the vdev needs to be deleted */
4475 		vdev->delete.pending = 1;
4476 		vdev->delete.callback = callback;
4477 		vdev->delete.context = cb_context;
4478 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4479 		return;
4480 	}
4481 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4482 
4483 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4484 	if (!soc->hw_nac_monitor_support) {
4485 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
4486 			      neighbour_peer_list_elem) {
4487 			QDF_ASSERT(peer->vdev != vdev);
4488 		}
4489 	} else {
4490 		TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
4491 				   neighbour_peer_list_elem, temp_peer) {
4492 			if (peer->vdev == vdev) {
4493 				TAILQ_REMOVE(&pdev->neighbour_peers_list, peer,
4494 					     neighbour_peer_list_elem);
4495 				qdf_mem_free(peer);
4496 			}
4497 		}
4498 	}
4499 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4500 
4501 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4502 	dp_tx_vdev_detach(vdev);
4503 	/* remove the vdev from its parent pdev's list */
4504 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
4505 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4506 		FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
4507 
4508 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4509 free_vdev:
4510 	qdf_mem_free(vdev);
4511 
4512 	if (callback)
4513 		callback(cb_context);
4514 }
4515 
4516 /*
4517  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
4518  * @soc - datapath soc handle
4519  * @peer - datapath peer handle
4520  *
4521  * Delete the AST entries belonging to a peer
4522  */
4523 #ifdef FEATURE_AST
4524 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
4525 					      struct dp_peer *peer)
4526 {
4527 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
4528 
4529 	qdf_spin_lock_bh(&soc->ast_lock);
4530 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
4531 		dp_peer_del_ast(soc, ast_entry);
4532 
4533 	peer->self_ast_entry = NULL;
4534 	qdf_spin_unlock_bh(&soc->ast_lock);
4535 }
4536 #else
4537 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
4538 					      struct dp_peer *peer)
4539 {
4540 }
4541 #endif
4542 
4543 #if ATH_SUPPORT_WRAP
4544 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
4545 						uint8_t *peer_mac_addr)
4546 {
4547 	struct dp_peer *peer;
4548 
4549 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
4550 				      0, vdev->vdev_id);
4551 	if (!peer)
4552 		return NULL;
4553 
4554 	if (peer->bss_peer)
4555 		return peer;
4556 
4557 	dp_peer_unref_delete(peer);
4558 	return NULL;
4559 }
4560 #else
4561 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
4562 						uint8_t *peer_mac_addr)
4563 {
4564 	struct dp_peer *peer;
4565 
4566 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
4567 				      0, vdev->vdev_id);
4568 	if (!peer)
4569 		return NULL;
4570 
4571 	if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
4572 		return peer;
4573 
4574 	dp_peer_unref_delete(peer);
4575 	return NULL;
4576 }
4577 #endif
4578 
4579 #ifdef FEATURE_AST
4580 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
4581 					       uint8_t *peer_mac_addr)
4582 {
4583 	struct dp_ast_entry *ast_entry;
4584 
4585 	qdf_spin_lock_bh(&soc->ast_lock);
4586 	ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
4587 
4588 	if (ast_entry && ast_entry->next_hop &&
4589 	    !ast_entry->delete_in_progress)
4590 		dp_peer_del_ast(soc, ast_entry);
4591 
4592 	qdf_spin_unlock_bh(&soc->ast_lock);
4593 }
4594 #endif
4595 
4596 /*
4597  * dp_peer_create_wifi3() - attach txrx peer
4598  * @txrx_vdev: Datapath VDEV handle
4599  * @peer_mac_addr: Peer MAC address
4600  *
4601  * Return: DP peeer handle on success, NULL on failure
4602  */
4603 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
4604 		uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
4605 {
4606 	struct dp_peer *peer;
4607 	int i;
4608 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4609 	struct dp_pdev *pdev;
4610 	struct dp_soc *soc;
4611 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
4612 
4613 	/* preconditions */
4614 	qdf_assert(vdev);
4615 	qdf_assert(peer_mac_addr);
4616 
4617 	pdev = vdev->pdev;
4618 	soc = pdev->soc;
4619 
4620 	/*
4621 	 * If a peer entry with given MAC address already exists,
4622 	 * reuse the peer and reset the state of peer.
4623 	 */
4624 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
4625 
4626 	if (peer) {
4627 		qdf_atomic_init(&peer->is_default_route_set);
4628 		dp_peer_cleanup(vdev, peer);
4629 
4630 		peer->delete_in_progress = false;
4631 
4632 		dp_peer_delete_ast_entries(soc, peer);
4633 
4634 		if ((vdev->opmode == wlan_op_mode_sta) &&
4635 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
4636 		     DP_MAC_ADDR_LEN)) {
4637 			ast_type = CDP_TXRX_AST_TYPE_SELF;
4638 		}
4639 
4640 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
4641 
4642 		/*
4643 		* Control path maintains a node count which is incremented
4644 		* for every new peer create command. Since new peer is not being
4645 		* created and earlier reference is reused here,
4646 		* peer_unref_delete event is sent to control path to
4647 		* increment the count back.
4648 		*/
4649 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
4650 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
4651 				peer->mac_addr.raw, vdev->mac_addr.raw,
4652 				vdev->opmode, peer->ctrl_peer, ctrl_peer);
4653 		}
4654 		peer->ctrl_peer = ctrl_peer;
4655 
4656 		dp_local_peer_id_alloc(pdev, peer);
4657 		DP_STATS_INIT(peer);
4658 
4659 		return (void *)peer;
4660 	} else {
4661 		/*
4662 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
4663 		 * need to remove the AST entry which was earlier added as a WDS
4664 		 * entry.
4665 		 * If an AST entry exists, but no peer entry exists with a given
4666 		 * MAC addresses, we could deduce it as a WDS entry
4667 		 */
4668 		dp_peer_ast_handle_roam_del(soc, peer_mac_addr);
4669 	}
4670 
4671 #ifdef notyet
4672 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
4673 		soc->mempool_ol_ath_peer);
4674 #else
4675 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
4676 #endif
4677 
4678 	if (!peer)
4679 		return NULL; /* failure */
4680 
4681 	qdf_mem_zero(peer, sizeof(struct dp_peer));
4682 
4683 	TAILQ_INIT(&peer->ast_entry_list);
4684 
4685 	/* store provided params */
4686 	peer->vdev = vdev;
4687 	peer->ctrl_peer = ctrl_peer;
4688 
4689 	if ((vdev->opmode == wlan_op_mode_sta) &&
4690 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
4691 			 DP_MAC_ADDR_LEN)) {
4692 		ast_type = CDP_TXRX_AST_TYPE_SELF;
4693 	}
4694 
4695 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
4696 
4697 	qdf_spinlock_create(&peer->peer_info_lock);
4698 
4699 	qdf_mem_copy(
4700 		&peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
4701 
4702 	/* TODO: See of rx_opt_proc is really required */
4703 	peer->rx_opt_proc = soc->rx_opt_proc;
4704 
4705 	/* initialize the peer_id */
4706 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
4707 		peer->peer_ids[i] = HTT_INVALID_PEER;
4708 
4709 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4710 
4711 	qdf_atomic_init(&peer->ref_cnt);
4712 
4713 	/* keep one reference for attach */
4714 	qdf_atomic_inc(&peer->ref_cnt);
4715 
4716 	/* add this peer into the vdev's list */
4717 	if (wlan_op_mode_sta == vdev->opmode)
4718 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
4719 	else
4720 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
4721 
4722 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4723 
4724 	/* TODO: See if hash based search is required */
4725 	dp_peer_find_hash_add(soc, peer);
4726 
4727 	/* Initialize the peer state */
4728 	peer->state = OL_TXRX_PEER_STATE_DISC;
4729 
4730 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4731 		"vdev %pK created peer %pK (%pM) ref_cnt: %d",
4732 		vdev, peer, peer->mac_addr.raw,
4733 		qdf_atomic_read(&peer->ref_cnt));
4734 	/*
4735 	 * For every peer MAp message search and set if bss_peer
4736 	 */
4737 	if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
4738 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4739 			"vdev bss_peer!!!!");
4740 		peer->bss_peer = 1;
4741 		vdev->vap_bss_peer = peer;
4742 	}
4743 	for (i = 0; i < DP_MAX_TIDS; i++)
4744 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
4745 
4746 	dp_local_peer_id_alloc(pdev, peer);
4747 	DP_STATS_INIT(peer);
4748 	return (void *)peer;
4749 }
4750 
4751 /*
4752  * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
4753  * @vdev: Datapath VDEV handle
4754  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
4755  * @hash_based: pointer to hash value (enabled/disabled) to be populated
4756  *
4757  * Return: None
4758  */
4759 static
4760 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
4761 				  enum cdp_host_reo_dest_ring *reo_dest,
4762 				  bool *hash_based)
4763 {
4764 	struct dp_soc *soc;
4765 	struct dp_pdev *pdev;
4766 
4767 	pdev = vdev->pdev;
4768 	soc = pdev->soc;
4769 	/*
4770 	 * hash based steering is disabled for Radios which are offloaded
4771 	 * to NSS
4772 	 */
4773 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
4774 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
4775 
4776 	/*
4777 	 * Below line of code will ensure the proper reo_dest ring is chosen
4778 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
4779 	 */
4780 	*reo_dest = pdev->reo_dest;
4781 }
4782 
4783 #ifdef IPA_OFFLOAD
4784 /*
4785  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
4786  * @vdev: Datapath VDEV handle
4787  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
4788  * @hash_based: pointer to hash value (enabled/disabled) to be populated
4789  *
4790  * If IPA is enabled in ini, for SAP mode, disable hash based
4791  * steering, use default reo_dst ring for RX. Use config values for other modes.
4792  * Return: None
4793  */
4794 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
4795 				       enum cdp_host_reo_dest_ring *reo_dest,
4796 				       bool *hash_based)
4797 {
4798 	struct dp_soc *soc;
4799 	struct dp_pdev *pdev;
4800 
4801 	pdev = vdev->pdev;
4802 	soc = pdev->soc;
4803 
4804 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
4805 
4806 	/*
4807 	 * If IPA is enabled, disable hash-based flow steering and set
4808 	 * reo_dest_ring_4 as the REO ring to receive packets on.
4809 	 * IPA is configured to reap reo_dest_ring_4.
4810 	 *
4811 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
4812 	 * value enum value is from 1 - 4.
4813 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
4814 	 */
4815 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4816 		if (vdev->opmode == wlan_op_mode_ap) {
4817 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
4818 			*hash_based = 0;
4819 		}
4820 	}
4821 }
4822 
4823 #else
4824 
4825 /*
4826  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
4827  * @vdev: Datapath VDEV handle
4828  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
4829  * @hash_based: pointer to hash value (enabled/disabled) to be populated
4830  *
4831  * Use system config values for hash based steering.
4832  * Return: None
4833  */
4834 
4835 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
4836 				       enum cdp_host_reo_dest_ring *reo_dest,
4837 				       bool *hash_based)
4838 {
4839 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
4840 }
4841 #endif /* IPA_OFFLOAD */
4842 
4843 /*
4844  * dp_peer_setup_wifi3() - initialize the peer
4845  * @vdev_hdl: virtual device object
4846  * @peer: Peer object
4847  *
4848  * Return: void
4849  */
4850 static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
4851 {
4852 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
4853 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4854 	struct dp_pdev *pdev;
4855 	struct dp_soc *soc;
4856 	bool hash_based = 0;
4857 	enum cdp_host_reo_dest_ring reo_dest;
4858 
4859 	/* preconditions */
4860 	qdf_assert(vdev);
4861 	qdf_assert(peer);
4862 
4863 	pdev = vdev->pdev;
4864 	soc = pdev->soc;
4865 
4866 	peer->last_assoc_rcvd = 0;
4867 	peer->last_disassoc_rcvd = 0;
4868 	peer->last_deauth_rcvd = 0;
4869 
4870 	dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
4871 
4872 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
4873 		pdev->pdev_id, vdev->vdev_id,
4874 		vdev->opmode, hash_based, reo_dest);
4875 
4876 
4877 	/*
4878 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
4879 	 * i.e both the devices have same MAC address. In these
4880 	 * cases we want such pkts to be processed in NULL Q handler
4881 	 * which is REO2TCL ring. for this reason we should
4882 	 * not setup reo_queues and default route for bss_peer.
4883 	 */
4884 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap)
4885 		return;
4886 
4887 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
4888 		/* TODO: Check the destination ring number to be passed to FW */
4889 		soc->cdp_soc.ol_ops->peer_set_default_routing(
4890 				pdev->ctrl_pdev, peer->mac_addr.raw,
4891 				peer->vdev->vdev_id, hash_based, reo_dest);
4892 	}
4893 
4894 	qdf_atomic_set(&peer->is_default_route_set, 1);
4895 
4896 	dp_peer_rx_init(pdev, peer);
4897 	return;
4898 }
4899 
4900 /*
4901  * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
4902  * @vdev_handle: virtual device object
4903  * @htt_pkt_type: type of pkt
4904  *
4905  * Return: void
4906  */
4907 static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
4908 	 enum htt_cmn_pkt_type val)
4909 {
4910 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4911 	vdev->tx_encap_type = val;
4912 }
4913 
4914 /*
4915  * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
4916  * @vdev_handle: virtual device object
4917  * @htt_pkt_type: type of pkt
4918  *
4919  * Return: void
4920  */
4921 static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
4922 	 enum htt_cmn_pkt_type val)
4923 {
4924 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4925 	vdev->rx_decap_type = val;
4926 }
4927 
4928 /*
4929  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
4930  * @txrx_soc: cdp soc handle
4931  * @ac: Access category
4932  * @value: timeout value in millisec
4933  *
4934  * Return: void
4935  */
4936 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
4937 				    uint8_t ac, uint32_t value)
4938 {
4939 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4940 
4941 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
4942 }
4943 
4944 /*
4945  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
4946  * @txrx_soc: cdp soc handle
4947  * @ac: access category
4948  * @value: timeout value in millisec
4949  *
4950  * Return: void
4951  */
4952 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
4953 				    uint8_t ac, uint32_t *value)
4954 {
4955 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4956 
4957 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
4958 }
4959 
4960 /*
4961  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
4962  * @pdev_handle: physical device object
4963  * @val: reo destination ring index (1 - 4)
4964  *
4965  * Return: void
4966  */
4967 static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
4968 	 enum cdp_host_reo_dest_ring val)
4969 {
4970 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4971 
4972 	if (pdev)
4973 		pdev->reo_dest = val;
4974 }
4975 
4976 /*
4977  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
4978  * @pdev_handle: physical device object
4979  *
4980  * Return: reo destination ring index
4981  */
4982 static enum cdp_host_reo_dest_ring
4983 dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
4984 {
4985 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4986 
4987 	if (pdev)
4988 		return pdev->reo_dest;
4989 	else
4990 		return cdp_host_reo_dest_ring_unknown;
4991 }
4992 
4993 /*
4994  * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
4995  * @pdev_handle: device object
4996  * @val: value to be set
4997  *
4998  * Return: void
4999  */
5000 static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
5001 	 uint32_t val)
5002 {
5003 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5004 
5005 	/* Enable/Disable smart mesh filtering. This flag will be checked
5006 	 * during rx processing to check if packets are from NAC clients.
5007 	 */
5008 	pdev->filter_neighbour_peers = val;
5009 	return 0;
5010 }
5011 
5012 /*
5013  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
5014  * address for smart mesh filtering
5015  * @vdev_handle: virtual device object
5016  * @cmd: Add/Del command
5017  * @macaddr: nac client mac address
5018  *
5019  * Return: void
5020  */
5021 static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
5022 					    uint32_t cmd, uint8_t *macaddr)
5023 {
5024 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5025 	struct dp_pdev *pdev = vdev->pdev;
5026 	struct dp_neighbour_peer *peer = NULL;
5027 
5028 	if (!macaddr)
5029 		goto fail0;
5030 
5031 	/* Store address of NAC (neighbour peer) which will be checked
5032 	 * against TA of received packets.
5033 	 */
5034 	if (cmd == DP_NAC_PARAM_ADD) {
5035 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
5036 				sizeof(*peer));
5037 
5038 		if (!peer) {
5039 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5040 				FL("DP neighbour peer node memory allocation failed"));
5041 			goto fail0;
5042 		}
5043 
5044 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
5045 			macaddr, DP_MAC_ADDR_LEN);
5046 		peer->vdev = vdev;
5047 
5048 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5049 
5050 		/* add this neighbour peer into the list */
5051 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
5052 				neighbour_peer_list_elem);
5053 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5054 
5055 		/* first neighbour */
5056 		if (!pdev->neighbour_peers_added) {
5057 			pdev->neighbour_peers_added = true;
5058 			dp_ppdu_ring_cfg(pdev);
5059 		}
5060 		return 1;
5061 
5062 	} else if (cmd == DP_NAC_PARAM_DEL) {
5063 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5064 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
5065 				neighbour_peer_list_elem) {
5066 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
5067 				macaddr, DP_MAC_ADDR_LEN)) {
5068 				/* delete this peer from the list */
5069 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
5070 					peer, neighbour_peer_list_elem);
5071 				qdf_mem_free(peer);
5072 				break;
5073 			}
5074 		}
5075 		/* last neighbour deleted */
5076 		if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) {
5077 			pdev->neighbour_peers_added = false;
5078 			dp_ppdu_ring_cfg(pdev);
5079 		}
5080 
5081 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5082 
5083 		if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
5084 		    !pdev->enhanced_stats_en)
5085 			dp_ppdu_ring_reset(pdev);
5086 		return 1;
5087 
5088 	}
5089 
5090 fail0:
5091 	return 0;
5092 }
5093 
5094 /*
5095  * dp_get_sec_type() - Get the security type
5096  * @peer:		Datapath peer handle
5097  * @sec_idx:    Security id (mcast, ucast)
5098  *
5099  * return sec_type: Security type
5100  */
5101 static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
5102 {
5103 	struct dp_peer *dpeer = (struct dp_peer *)peer;
5104 
5105 	return dpeer->security[sec_idx].sec_type;
5106 }
5107 
5108 /*
5109  * dp_peer_authorize() - authorize txrx peer
5110  * @peer_handle:		Datapath peer handle
5111  * @authorize
5112  *
5113  */
5114 static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
5115 {
5116 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5117 	struct dp_soc *soc;
5118 
5119 	if (peer != NULL) {
5120 		soc = peer->vdev->pdev->soc;
5121 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
5122 		peer->authorize = authorize ? 1 : 0;
5123 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5124 	}
5125 }
5126 
5127 static void dp_reset_and_release_peer_mem(struct dp_soc *soc,
5128 					  struct dp_pdev *pdev,
5129 					  struct dp_peer *peer,
5130 					  uint32_t vdev_id)
5131 {
5132 	struct dp_vdev *vdev = NULL;
5133 	struct dp_peer *bss_peer = NULL;
5134 	uint8_t *m_addr = NULL;
5135 
5136 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5137 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5138 		if (vdev->vdev_id == vdev_id)
5139 			break;
5140 	}
5141 	if (!vdev) {
5142 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5143 			  "vdev is NULL");
5144 	} else {
5145 		if (vdev->vap_bss_peer == peer)
5146 		    vdev->vap_bss_peer = NULL;
5147 		m_addr = peer->mac_addr.raw;
5148 		if (soc->cdp_soc.ol_ops->peer_unref_delete)
5149 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
5150 				m_addr, vdev->mac_addr.raw, vdev->opmode,
5151 				peer->ctrl_peer, NULL);
5152 
5153 		if (vdev && vdev->vap_bss_peer) {
5154 		    bss_peer = vdev->vap_bss_peer;
5155 		    DP_UPDATE_STATS(vdev, peer);
5156 		}
5157 	}
5158 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5159 
5160 	/*
5161 	 * Peer AST list hast to be empty here
5162 	 */
5163 	DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
5164 
5165 	qdf_mem_free(peer);
5166 }
5167 
5168 /**
5169  * dp_delete_pending_vdev() - check and process vdev delete
5170  * @pdev: DP specific pdev pointer
5171  * @vdev: DP specific vdev pointer
5172  * @vdev_id: vdev id corresponding to vdev
5173  *
5174  * This API does following:
5175  * 1) It releases tx flow pools buffers as vdev is
5176  *    going down and no peers are associated.
5177  * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory
5178  */
5179 static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev,
5180 				   uint8_t vdev_id)
5181 {
5182 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
5183 	void *vdev_delete_context = NULL;
5184 
5185 	vdev_delete_cb = vdev->delete.callback;
5186 	vdev_delete_context = vdev->delete.context;
5187 
5188 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5189 		  FL("deleting vdev object %pK (%pM)- its last peer is done"),
5190 		  vdev, vdev->mac_addr.raw);
5191 	/* all peers are gone, go ahead and delete it */
5192 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
5193 			FLOW_TYPE_VDEV, vdev_id);
5194 	dp_tx_vdev_detach(vdev);
5195 
5196 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5197 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
5198 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5199 
5200 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5201 		  FL("deleting vdev object %pK (%pM)"),
5202 		  vdev, vdev->mac_addr.raw);
5203 	qdf_mem_free(vdev);
5204 	vdev = NULL;
5205 
5206 	if (vdev_delete_cb)
5207 		vdev_delete_cb(vdev_delete_context);
5208 }
5209 
5210 /*
5211  * dp_peer_unref_delete() - unref and delete peer
5212  * @peer_handle:		Datapath peer handle
5213  *
5214  */
5215 void dp_peer_unref_delete(void *peer_handle)
5216 {
5217 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5218 	struct dp_vdev *vdev = peer->vdev;
5219 	struct dp_pdev *pdev = vdev->pdev;
5220 	struct dp_soc *soc = pdev->soc;
5221 	struct dp_peer *tmppeer;
5222 	int found = 0;
5223 	uint16_t peer_id;
5224 	uint16_t vdev_id;
5225 	bool delete_vdev;
5226 
5227 	/*
5228 	 * Hold the lock all the way from checking if the peer ref count
5229 	 * is zero until the peer references are removed from the hash
5230 	 * table and vdev list (if the peer ref count is zero).
5231 	 * This protects against a new HL tx operation starting to use the
5232 	 * peer object just after this function concludes it's done being used.
5233 	 * Furthermore, the lock needs to be held while checking whether the
5234 	 * vdev's list of peers is empty, to make sure that list is not modified
5235 	 * concurrently with the empty check.
5236 	 */
5237 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5238 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
5239 		peer_id = peer->peer_ids[0];
5240 		vdev_id = vdev->vdev_id;
5241 
5242 		/*
5243 		 * Make sure that the reference to the peer in
5244 		 * peer object map is removed
5245 		 */
5246 		if (peer_id != HTT_INVALID_PEER)
5247 			soc->peer_id_to_obj_map[peer_id] = NULL;
5248 
5249 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5250 			"Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
5251 
5252 		/* remove the reference to the peer from the hash table */
5253 		dp_peer_find_hash_remove(soc, peer);
5254 
5255 		qdf_spin_lock_bh(&soc->ast_lock);
5256 		if (peer->self_ast_entry) {
5257 			dp_peer_del_ast(soc, peer->self_ast_entry);
5258 			peer->self_ast_entry = NULL;
5259 		}
5260 		qdf_spin_unlock_bh(&soc->ast_lock);
5261 
5262 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
5263 			if (tmppeer == peer) {
5264 				found = 1;
5265 				break;
5266 			}
5267 		}
5268 
5269 		if (found) {
5270 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
5271 				peer_list_elem);
5272 		} else {
5273 			/*Ignoring the remove operation as peer not found*/
5274 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5275 				  "peer:%pK not found in vdev:%pK peerlist:%pK",
5276 				  peer, vdev, &peer->vdev->peer_list);
5277 		}
5278 
5279 		/* cleanup the peer data */
5280 		dp_peer_cleanup(vdev, peer);
5281 
5282 		/* check whether the parent vdev has no peers left */
5283 		if (TAILQ_EMPTY(&vdev->peer_list)) {
5284 			/*
5285 			 * capture vdev delete pending flag's status
5286 			 * while holding peer_ref_mutex lock
5287 			 */
5288 			delete_vdev = vdev->delete.pending;
5289 			/*
5290 			 * Now that there are no references to the peer, we can
5291 			 * release the peer reference lock.
5292 			 */
5293 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5294 			/*
5295 			 * Check if the parent vdev was waiting for its peers
5296 			 * to be deleted, in order for it to be deleted too.
5297 			 */
5298 			if (delete_vdev)
5299 				dp_delete_pending_vdev(pdev, vdev, vdev_id);
5300 		} else {
5301 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5302 		}
5303 		dp_reset_and_release_peer_mem(soc, pdev, peer, vdev_id);
5304 
5305 	} else {
5306 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5307 	}
5308 }
5309 
5310 /*
5311  * dp_peer_detach_wifi3() – Detach txrx peer
5312  * @peer_handle: Datapath peer handle
5313  * @bitmap: bitmap indicating special handling of request.
5314  *
5315  */
5316 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
5317 {
5318 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5319 
5320 	/* redirect the peer's rx delivery function to point to a
5321 	 * discard func
5322 	 */
5323 
5324 	peer->rx_opt_proc = dp_rx_discard;
5325 
5326 	/* Do not make ctrl_peer to NULL for connected sta peers.
5327 	 * We need ctrl_peer to release the reference during dp
5328 	 * peer free. This reference was held for
5329 	 * obj_mgr peer during the creation of dp peer.
5330 	 */
5331 	if (!(peer->vdev && (peer->vdev->opmode != wlan_op_mode_sta) &&
5332 	      !peer->bss_peer))
5333 		peer->ctrl_peer = NULL;
5334 
5335 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5336 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
5337 
5338 	dp_local_peer_id_free(peer->vdev->pdev, peer);
5339 	qdf_spinlock_destroy(&peer->peer_info_lock);
5340 
5341 	/*
5342 	 * Remove the reference added during peer_attach.
5343 	 * The peer will still be left allocated until the
5344 	 * PEER_UNMAP message arrives to remove the other
5345 	 * reference, added by the PEER_MAP message.
5346 	 */
5347 	dp_peer_unref_delete(peer_handle);
5348 }
5349 
5350 /*
5351  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
5352  * @peer_handle:		Datapath peer handle
5353  *
5354  */
5355 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
5356 {
5357 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
5358 	return vdev->mac_addr.raw;
5359 }
5360 
5361 /*
5362  * dp_vdev_set_wds() - Enable per packet stats
5363  * @vdev_handle: DP VDEV handle
5364  * @val: value
5365  *
5366  * Return: none
5367  */
5368 static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
5369 {
5370 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5371 
5372 	vdev->wds_enabled = val;
5373 	return 0;
5374 }
5375 
5376 /*
5377  * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
5378  * @peer_handle:		Datapath peer handle
5379  *
5380  */
5381 static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
5382 						uint8_t vdev_id)
5383 {
5384 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
5385 	struct dp_vdev *vdev = NULL;
5386 
5387 	if (qdf_unlikely(!pdev))
5388 		return NULL;
5389 
5390 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5391 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5392 		if (vdev->vdev_id == vdev_id)
5393 			break;
5394 	}
5395 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5396 
5397 	return (struct cdp_vdev *)vdev;
5398 }
5399 
5400 /*
5401  * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev handle of monitor mode
5402  * @dev: PDEV handle
5403  *
5404  * Return: VDEV handle of monitor mode
5405  */
5406 
5407 static struct cdp_vdev *dp_get_mon_vdev_from_pdev_wifi3(struct cdp_pdev *dev)
5408 {
5409 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
5410 
5411 	if (qdf_unlikely(!pdev))
5412 		return NULL;
5413 
5414 	return (struct cdp_vdev *)pdev->monitor_vdev;
5415 }
5416 
5417 static int dp_get_opmode(struct cdp_vdev *vdev_handle)
5418 {
5419 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5420 
5421 	return vdev->opmode;
5422 }
5423 
5424 static
5425 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_vdev *pvdev,
5426 					  ol_txrx_rx_fp *stack_fn_p,
5427 					  ol_osif_vdev_handle *osif_vdev_p)
5428 {
5429 	struct dp_vdev *vdev = dp_get_dp_vdev_from_cdp_vdev(pvdev);
5430 
5431 	qdf_assert(vdev);
5432 	*stack_fn_p = vdev->osif_rx_stack;
5433 	*osif_vdev_p = vdev->osif_vdev;
5434 }
5435 
5436 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
5437 {
5438 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
5439 	struct dp_pdev *pdev = vdev->pdev;
5440 
5441 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
5442 }
5443 
5444 /**
5445  * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
5446  *                                 ring based on target
5447  * @soc: soc handle
5448  * @mac_for_pdev: pdev_id
5449  * @pdev: physical device handle
5450  * @ring_num: mac id
5451  * @htt_tlv_filter: tlv filter
5452  *
5453  * Return: zero on success, non-zero on failure
5454  */
5455 static inline
5456 QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
5457 				       struct dp_pdev *pdev, uint8_t ring_num,
5458 				       struct htt_rx_ring_tlv_filter htt_tlv_filter)
5459 {
5460 	QDF_STATUS status;
5461 
5462 	if (soc->wlan_cfg_ctx->rxdma1_enable)
5463 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5464 					     pdev->rxdma_mon_buf_ring[ring_num]
5465 					     .hal_srng,
5466 					     RXDMA_MONITOR_BUF, RX_BUFFER_SIZE,
5467 					     &htt_tlv_filter);
5468 	else
5469 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5470 					     pdev->rx_mac_buf_ring[ring_num]
5471 					     .hal_srng,
5472 					     RXDMA_BUF, RX_BUFFER_SIZE,
5473 					     &htt_tlv_filter);
5474 
5475 	return status;
5476 }
5477 
5478 /**
5479  * dp_reset_monitor_mode() - Disable monitor mode
5480  * @pdev_handle: Datapath PDEV handle
5481  *
5482  * Return: 0 on success, not 0 on failure
5483  */
5484 static QDF_STATUS dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
5485 {
5486 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5487 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5488 	struct dp_soc *soc = pdev->soc;
5489 	uint8_t pdev_id;
5490 	int mac_id;
5491 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5492 
5493 	pdev_id = pdev->pdev_id;
5494 	soc = pdev->soc;
5495 
5496 	qdf_spin_lock_bh(&pdev->mon_lock);
5497 
5498 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5499 
5500 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5501 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5502 
5503 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
5504 						     pdev, mac_id,
5505 						     htt_tlv_filter);
5506 
5507 		if (status != QDF_STATUS_SUCCESS) {
5508 			dp_err("Failed to send tlv filter for monitor mode rings");
5509 			return status;
5510 		}
5511 
5512 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5513 			    pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5514 			    RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE,
5515 			    &htt_tlv_filter);
5516 	}
5517 
5518 	pdev->monitor_vdev = NULL;
5519 	pdev->mcopy_mode = 0;
5520 	pdev->monitor_configured = false;
5521 
5522 	qdf_spin_unlock_bh(&pdev->mon_lock);
5523 
5524 	return QDF_STATUS_SUCCESS;
5525 }
5526 
5527 /**
5528  * dp_set_nac() - set peer_nac
5529  * @peer_handle: Datapath PEER handle
5530  *
5531  * Return: void
5532  */
5533 static void dp_set_nac(struct cdp_peer *peer_handle)
5534 {
5535 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5536 
5537 	peer->nac = 1;
5538 }
5539 
5540 /**
5541  * dp_get_tx_pending() - read pending tx
5542  * @pdev_handle: Datapath PDEV handle
5543  *
5544  * Return: outstanding tx
5545  */
5546 static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
5547 {
5548 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5549 
5550 	return qdf_atomic_read(&pdev->num_tx_outstanding);
5551 }
5552 
5553 /**
5554  * dp_get_peer_mac_from_peer_id() - get peer mac
5555  * @pdev_handle: Datapath PDEV handle
5556  * @peer_id: Peer ID
5557  * @peer_mac: MAC addr of PEER
5558  *
5559  * Return: void
5560  */
5561 static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
5562 	uint32_t peer_id, uint8_t *peer_mac)
5563 {
5564 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5565 	struct dp_peer *peer;
5566 
5567 	if (pdev && peer_mac) {
5568 		peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
5569 		if (peer) {
5570 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
5571 				     DP_MAC_ADDR_LEN);
5572 			dp_peer_unref_del_find_by_id(peer);
5573 		}
5574 	}
5575 }
5576 
5577 /**
5578  * dp_pdev_configure_monitor_rings() - configure monitor rings
5579  * @vdev_handle: Datapath VDEV handle
5580  *
5581  * Return: void
5582  */
5583 static QDF_STATUS dp_pdev_configure_monitor_rings(struct dp_pdev *pdev)
5584 {
5585 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5586 	struct dp_soc *soc;
5587 	uint8_t pdev_id;
5588 	int mac_id;
5589 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5590 
5591 	pdev_id = pdev->pdev_id;
5592 	soc = pdev->soc;
5593 
5594 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5595 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
5596 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
5597 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
5598 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
5599 		pdev->mo_data_filter);
5600 
5601 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5602 
5603 	htt_tlv_filter.mpdu_start = 1;
5604 	htt_tlv_filter.msdu_start = 1;
5605 	htt_tlv_filter.packet = 1;
5606 	htt_tlv_filter.msdu_end = 1;
5607 	htt_tlv_filter.mpdu_end = 1;
5608 	htt_tlv_filter.packet_header = 1;
5609 	htt_tlv_filter.attention = 1;
5610 	htt_tlv_filter.ppdu_start = 0;
5611 	htt_tlv_filter.ppdu_end = 0;
5612 	htt_tlv_filter.ppdu_end_user_stats = 0;
5613 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
5614 	htt_tlv_filter.ppdu_end_status_done = 0;
5615 	htt_tlv_filter.header_per_msdu = 1;
5616 	htt_tlv_filter.enable_fp =
5617 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
5618 	htt_tlv_filter.enable_md = 0;
5619 	htt_tlv_filter.enable_mo =
5620 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
5621 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
5622 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
5623 	if (pdev->mcopy_mode)
5624 		htt_tlv_filter.fp_data_filter = 0;
5625 	else
5626 		htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
5627 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
5628 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
5629 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
5630 
5631 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5632 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5633 
5634 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
5635 						     pdev, mac_id,
5636 						     htt_tlv_filter);
5637 
5638 		if (status != QDF_STATUS_SUCCESS) {
5639 			dp_err("Failed to send tlv filter for monitor mode rings");
5640 			return status;
5641 		}
5642 	}
5643 
5644 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5645 
5646 	htt_tlv_filter.mpdu_start = 1;
5647 	htt_tlv_filter.msdu_start = 0;
5648 	htt_tlv_filter.packet = 0;
5649 	htt_tlv_filter.msdu_end = 0;
5650 	htt_tlv_filter.mpdu_end = 0;
5651 	htt_tlv_filter.attention = 0;
5652 	htt_tlv_filter.ppdu_start = 1;
5653 	htt_tlv_filter.ppdu_end = 1;
5654 	htt_tlv_filter.ppdu_end_user_stats = 1;
5655 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5656 	htt_tlv_filter.ppdu_end_status_done = 1;
5657 	htt_tlv_filter.enable_fp = 1;
5658 	htt_tlv_filter.enable_md = 0;
5659 	htt_tlv_filter.enable_mo = 1;
5660 	if (pdev->mcopy_mode) {
5661 		htt_tlv_filter.packet_header = 1;
5662 	}
5663 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5664 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5665 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5666 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5667 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5668 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
5669 
5670 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5671 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5672 						pdev->pdev_id);
5673 
5674 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5675 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5676 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5677 	}
5678 
5679 	return status;
5680 }
5681 
5682 /**
5683  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
5684  * @vdev_handle: Datapath VDEV handle
5685  * @smart_monitor: Flag to denote if its smart monitor mode
5686  *
5687  * Return: 0 on success, not 0 on failure
5688  */
5689 static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
5690 					   uint8_t smart_monitor)
5691 {
5692 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5693 	struct dp_pdev *pdev;
5694 
5695 	qdf_assert(vdev);
5696 
5697 	pdev = vdev->pdev;
5698 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
5699 		  "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
5700 		  pdev, pdev->pdev_id, pdev->soc, vdev);
5701 
5702 	/*Check if current pdev's monitor_vdev exists */
5703 	if (pdev->monitor_configured) {
5704 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5705 			  "monitor vap already created vdev=%pK\n", vdev);
5706 		qdf_assert(vdev);
5707 		return QDF_STATUS_E_RESOURCES;
5708 	}
5709 
5710 	pdev->monitor_vdev = vdev;
5711 	pdev->monitor_configured = true;
5712 
5713 	/* If smart monitor mode, do not configure monitor ring */
5714 	if (smart_monitor)
5715 		return QDF_STATUS_SUCCESS;
5716 
5717 	return dp_pdev_configure_monitor_rings(pdev);
5718 }
5719 
5720 /**
5721  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
5722  * @pdev_handle: Datapath PDEV handle
5723  * @filter_val: Flag to select Filter for monitor mode
5724  * Return: 0 on success, not 0 on failure
5725  */
5726 static QDF_STATUS
5727 dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
5728 				   struct cdp_monitor_filter *filter_val)
5729 {
5730 	/* Many monitor VAPs can exists in a system but only one can be up at
5731 	 * anytime
5732 	 */
5733 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5734 	struct dp_vdev *vdev = pdev->monitor_vdev;
5735 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5736 	struct dp_soc *soc;
5737 	uint8_t pdev_id;
5738 	int mac_id;
5739 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5740 
5741 	pdev_id = pdev->pdev_id;
5742 	soc = pdev->soc;
5743 
5744 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
5745 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
5746 		pdev, pdev_id, soc, vdev);
5747 
5748 	/*Check if current pdev's monitor_vdev exists */
5749 	if (!pdev->monitor_vdev) {
5750 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5751 			"vdev=%pK", vdev);
5752 		qdf_assert(vdev);
5753 	}
5754 
5755 	/* update filter mode, type in pdev structure */
5756 	pdev->mon_filter_mode = filter_val->mode;
5757 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
5758 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
5759 	pdev->fp_data_filter = filter_val->fp_data;
5760 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
5761 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
5762 	pdev->mo_data_filter = filter_val->mo_data;
5763 
5764 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5765 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
5766 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
5767 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
5768 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
5769 		pdev->mo_data_filter);
5770 
5771 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5772 
5773 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5774 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5775 
5776 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
5777 						     pdev, mac_id,
5778 						     htt_tlv_filter);
5779 
5780 		if (status != QDF_STATUS_SUCCESS) {
5781 			dp_err("Failed to send tlv filter for monitor mode rings");
5782 			return status;
5783 		}
5784 
5785 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5786 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5787 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5788 	}
5789 
5790 	htt_tlv_filter.mpdu_start = 1;
5791 	htt_tlv_filter.msdu_start = 1;
5792 	htt_tlv_filter.packet = 1;
5793 	htt_tlv_filter.msdu_end = 1;
5794 	htt_tlv_filter.mpdu_end = 1;
5795 	htt_tlv_filter.packet_header = 1;
5796 	htt_tlv_filter.attention = 1;
5797 	htt_tlv_filter.ppdu_start = 0;
5798 	htt_tlv_filter.ppdu_end = 0;
5799 	htt_tlv_filter.ppdu_end_user_stats = 0;
5800 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
5801 	htt_tlv_filter.ppdu_end_status_done = 0;
5802 	htt_tlv_filter.header_per_msdu = 1;
5803 	htt_tlv_filter.enable_fp =
5804 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
5805 	htt_tlv_filter.enable_md = 0;
5806 	htt_tlv_filter.enable_mo =
5807 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
5808 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
5809 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
5810 	if (pdev->mcopy_mode)
5811 		htt_tlv_filter.fp_data_filter = 0;
5812 	else
5813 		htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
5814 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
5815 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
5816 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
5817 
5818 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5819 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5820 
5821 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
5822 						     pdev, mac_id,
5823 						     htt_tlv_filter);
5824 
5825 		if (status != QDF_STATUS_SUCCESS) {
5826 			dp_err("Failed to send tlv filter for monitor mode rings");
5827 			return status;
5828 		}
5829 	}
5830 
5831 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5832 
5833 	htt_tlv_filter.mpdu_start = 1;
5834 	htt_tlv_filter.msdu_start = 0;
5835 	htt_tlv_filter.packet = 0;
5836 	htt_tlv_filter.msdu_end = 0;
5837 	htt_tlv_filter.mpdu_end = 0;
5838 	htt_tlv_filter.attention = 0;
5839 	htt_tlv_filter.ppdu_start = 1;
5840 	htt_tlv_filter.ppdu_end = 1;
5841 	htt_tlv_filter.ppdu_end_user_stats = 1;
5842 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5843 	htt_tlv_filter.ppdu_end_status_done = 1;
5844 	htt_tlv_filter.enable_fp = 1;
5845 	htt_tlv_filter.enable_md = 0;
5846 	htt_tlv_filter.enable_mo = 1;
5847 	if (pdev->mcopy_mode) {
5848 		htt_tlv_filter.packet_header = 1;
5849 	}
5850 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5851 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5852 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5853 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5854 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5855 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
5856 
5857 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5858 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5859 						pdev->pdev_id);
5860 
5861 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5862 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5863 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5864 	}
5865 
5866 	return QDF_STATUS_SUCCESS;
5867 }
5868 
5869 /**
5870  * dp_get_pdev_id_frm_pdev() - get pdev_id
5871  * @pdev_handle: Datapath PDEV handle
5872  *
5873  * Return: pdev_id
5874  */
5875 static
5876 uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
5877 {
5878 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5879 
5880 	return pdev->pdev_id;
5881 }
5882 
5883 /**
5884  * dp_pdev_set_chan_noise_floor() - set channel noise floor
5885  * @pdev_handle: Datapath PDEV handle
5886  * @chan_noise_floor: Channel Noise Floor
5887  *
5888  * Return: void
5889  */
5890 static
5891 void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
5892 				  int16_t chan_noise_floor)
5893 {
5894 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5895 
5896 	pdev->chan_noise_floor = chan_noise_floor;
5897 }
5898 
5899 /**
5900  * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
5901  * @vdev_handle: Datapath VDEV handle
5902  * Return: true on ucast filter flag set
5903  */
5904 static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
5905 {
5906 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5907 	struct dp_pdev *pdev;
5908 
5909 	pdev = vdev->pdev;
5910 
5911 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
5912 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
5913 		return true;
5914 
5915 	return false;
5916 }
5917 
5918 /**
5919  * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
5920  * @vdev_handle: Datapath VDEV handle
5921  * Return: true on mcast filter flag set
5922  */
5923 static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
5924 {
5925 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5926 	struct dp_pdev *pdev;
5927 
5928 	pdev = vdev->pdev;
5929 
5930 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
5931 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
5932 		return true;
5933 
5934 	return false;
5935 }
5936 
5937 /**
5938  * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
5939  * @vdev_handle: Datapath VDEV handle
5940  * Return: true on non data filter flag set
5941  */
5942 static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
5943 {
5944 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5945 	struct dp_pdev *pdev;
5946 
5947 	pdev = vdev->pdev;
5948 
5949 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
5950 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
5951 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
5952 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
5953 			return true;
5954 		}
5955 	}
5956 
5957 	return false;
5958 }
5959 
5960 #ifdef MESH_MODE_SUPPORT
5961 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
5962 {
5963 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5964 
5965 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5966 		FL("val %d"), val);
5967 	vdev->mesh_vdev = val;
5968 }
5969 
5970 /*
5971  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
5972  * @vdev_hdl: virtual device object
5973  * @val: value to be set
5974  *
5975  * Return: void
5976  */
5977 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
5978 {
5979 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5980 
5981 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5982 		FL("val %d"), val);
5983 	vdev->mesh_rx_filter = val;
5984 }
5985 #endif
5986 
5987 /*
5988  * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
5989  * Current scope is bar received count
5990  *
5991  * @pdev_handle: DP_PDEV handle
5992  *
5993  * Return: void
5994  */
5995 #define STATS_PROC_TIMEOUT        (HZ/1000)
5996 
5997 static void
5998 dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
5999 {
6000 	struct dp_vdev *vdev;
6001 	struct dp_peer *peer;
6002 	uint32_t waitcnt;
6003 
6004 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
6005 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6006 			if (!peer) {
6007 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6008 					FL("DP Invalid Peer refernce"));
6009 				return;
6010 			}
6011 
6012 			if (peer->delete_in_progress) {
6013 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6014 					FL("DP Peer deletion in progress"));
6015 				continue;
6016 			}
6017 			qdf_atomic_inc(&peer->ref_cnt);
6018 			waitcnt = 0;
6019 			dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
6020 			while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
6021 				&& waitcnt < 10) {
6022 				schedule_timeout_interruptible(
6023 						STATS_PROC_TIMEOUT);
6024 				waitcnt++;
6025 			}
6026 			qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
6027 			dp_peer_unref_delete(peer);
6028 		}
6029 	}
6030 }
6031 
6032 /**
6033  * dp_rx_bar_stats_cb(): BAR received stats callback
6034  * @soc: SOC handle
6035  * @cb_ctxt: Call back context
6036  * @reo_status: Reo status
6037  *
6038  * return: void
6039  */
6040 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
6041 	union hal_reo_status *reo_status)
6042 {
6043 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
6044 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
6045 
6046 	if (!qdf_atomic_read(&soc->cmn_init_done))
6047 		return;
6048 
6049 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
6050 		DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
6051 			queue_status->header.status);
6052 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
6053 		return;
6054 	}
6055 
6056 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
6057 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
6058 
6059 }
6060 
6061 /**
6062  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
6063  * @vdev: DP VDEV handle
6064  *
6065  * return: void
6066  */
6067 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
6068 			     struct cdp_vdev_stats *vdev_stats)
6069 {
6070 	struct dp_peer *peer = NULL;
6071 	struct dp_soc *soc = NULL;
6072 
6073 	if (!vdev || !vdev->pdev)
6074 		return;
6075 
6076 	soc = vdev->pdev->soc;
6077 
6078 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
6079 
6080 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
6081 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
6082 		dp_update_vdev_stats(vdev_stats, peer);
6083 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6084 
6085 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6086 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
6087 			     vdev_stats, vdev->vdev_id,
6088 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
6089 #endif
6090 }
6091 
6092 /**
6093  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
6094  * @pdev: DP PDEV handle
6095  *
6096  * return: void
6097  */
6098 static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
6099 {
6100 	struct dp_vdev *vdev = NULL;
6101 	struct cdp_vdev_stats *vdev_stats =
6102 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
6103 
6104 	if (!vdev_stats) {
6105 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6106 			  "DP alloc failure - unable to get alloc vdev stats");
6107 		return;
6108 	}
6109 
6110 	qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
6111 	qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
6112 	qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
6113 
6114 	if (pdev->mcopy_mode)
6115 		DP_UPDATE_STATS(pdev, pdev->invalid_peer);
6116 
6117 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6118 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
6119 
6120 		dp_aggregate_vdev_stats(vdev, vdev_stats);
6121 		dp_update_pdev_stats(pdev, vdev_stats);
6122 		dp_update_pdev_ingress_stats(pdev, vdev);
6123 	}
6124 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6125 	qdf_mem_free(vdev_stats);
6126 
6127 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6128 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
6129 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
6130 #endif
6131 }
6132 
6133 /**
6134  * dp_vdev_getstats() - get vdev packet level stats
6135  * @vdev_handle: Datapath VDEV handle
6136  * @stats: cdp network device stats structure
6137  *
6138  * Return: void
6139  */
6140 static void dp_vdev_getstats(void *vdev_handle,
6141 		struct cdp_dev_stats *stats)
6142 {
6143 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6144 	struct cdp_vdev_stats *vdev_stats =
6145 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
6146 
6147 	if (!vdev_stats) {
6148 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6149 			  "DP alloc failure - unable to get alloc vdev stats");
6150 		return;
6151 	}
6152 
6153 	dp_aggregate_vdev_stats(vdev, vdev_stats);
6154 
6155 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
6156 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
6157 
6158 	stats->tx_errors = vdev_stats->tx.tx_failed +
6159 		vdev_stats->tx_i.dropped.dropped_pkt.num;
6160 	stats->tx_dropped = stats->tx_errors;
6161 
6162 	stats->rx_packets = vdev_stats->rx.unicast.num +
6163 		vdev_stats->rx.multicast.num +
6164 		vdev_stats->rx.bcast.num;
6165 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
6166 		vdev_stats->rx.multicast.bytes +
6167 		vdev_stats->rx.bcast.bytes;
6168 
6169 }
6170 
6171 
6172 /**
6173  * dp_pdev_getstats() - get pdev packet level stats
6174  * @pdev_handle: Datapath PDEV handle
6175  * @stats: cdp network device stats structure
6176  *
6177  * Return: void
6178  */
6179 static void dp_pdev_getstats(void *pdev_handle,
6180 		struct cdp_dev_stats *stats)
6181 {
6182 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6183 
6184 	dp_aggregate_pdev_stats(pdev);
6185 
6186 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
6187 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
6188 
6189 	stats->tx_errors = pdev->stats.tx.tx_failed +
6190 		pdev->stats.tx_i.dropped.dropped_pkt.num;
6191 	stats->tx_dropped = stats->tx_errors;
6192 
6193 	stats->rx_packets = pdev->stats.rx.unicast.num +
6194 		pdev->stats.rx.multicast.num +
6195 		pdev->stats.rx.bcast.num;
6196 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
6197 		pdev->stats.rx.multicast.bytes +
6198 		pdev->stats.rx.bcast.bytes;
6199 }
6200 
6201 /**
6202  * dp_get_device_stats() - get interface level packet stats
6203  * @handle: device handle
6204  * @stats: cdp network device stats structure
6205  * @type: device type pdev/vdev
6206  *
6207  * Return: void
6208  */
6209 static void dp_get_device_stats(void *handle,
6210 		struct cdp_dev_stats *stats, uint8_t type)
6211 {
6212 	switch (type) {
6213 	case UPDATE_VDEV_STATS:
6214 		dp_vdev_getstats(handle, stats);
6215 		break;
6216 	case UPDATE_PDEV_STATS:
6217 		dp_pdev_getstats(handle, stats);
6218 		break;
6219 	default:
6220 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6221 			"apstats cannot be updated for this input "
6222 			"type %d", type);
6223 		break;
6224 	}
6225 
6226 }
6227 
6228 
6229 /**
6230  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
6231  * @pdev: DP_PDEV Handle
6232  *
6233  * Return:void
6234  */
6235 static inline void
6236 dp_print_pdev_tx_stats(struct dp_pdev *pdev)
6237 {
6238 	uint8_t index = 0;
6239 
6240 	DP_PRINT_STATS("PDEV Tx Stats:\n");
6241 	DP_PRINT_STATS("Received From Stack:");
6242 	DP_PRINT_STATS("	Packets = %d",
6243 			pdev->stats.tx_i.rcvd.num);
6244 	DP_PRINT_STATS("	Bytes = %llu",
6245 			pdev->stats.tx_i.rcvd.bytes);
6246 	DP_PRINT_STATS("Processed:");
6247 	DP_PRINT_STATS("	Packets = %d",
6248 			pdev->stats.tx_i.processed.num);
6249 	DP_PRINT_STATS("	Bytes = %llu",
6250 			pdev->stats.tx_i.processed.bytes);
6251 	DP_PRINT_STATS("Total Completions:");
6252 	DP_PRINT_STATS("	Packets = %u",
6253 			pdev->stats.tx.comp_pkt.num);
6254 	DP_PRINT_STATS("	Bytes = %llu",
6255 			pdev->stats.tx.comp_pkt.bytes);
6256 	DP_PRINT_STATS("Successful Completions:");
6257 	DP_PRINT_STATS("	Packets = %u",
6258 			pdev->stats.tx.tx_success.num);
6259 	DP_PRINT_STATS("	Bytes = %llu",
6260 			pdev->stats.tx.tx_success.bytes);
6261 	DP_PRINT_STATS("Dropped:");
6262 	DP_PRINT_STATS("	Total = %d",
6263 			pdev->stats.tx_i.dropped.dropped_pkt.num);
6264 	DP_PRINT_STATS("	Dma_map_error = %d",
6265 			pdev->stats.tx_i.dropped.dma_error);
6266 	DP_PRINT_STATS("	Ring Full = %d",
6267 			pdev->stats.tx_i.dropped.ring_full);
6268 	DP_PRINT_STATS("	Descriptor Not available = %d",
6269 			pdev->stats.tx_i.dropped.desc_na.num);
6270 	DP_PRINT_STATS("	HW enqueue failed= %d",
6271 			pdev->stats.tx_i.dropped.enqueue_fail);
6272 	DP_PRINT_STATS("	Resources Full = %d",
6273 			pdev->stats.tx_i.dropped.res_full);
6274 	DP_PRINT_STATS("	FW removed Pkts = %u",
6275 		       pdev->stats.tx.dropped.fw_rem.num);
6276 	DP_PRINT_STATS("	FW removed bytes= %llu",
6277 		       pdev->stats.tx.dropped.fw_rem.bytes);
6278 	DP_PRINT_STATS("	FW removed transmitted = %d",
6279 			pdev->stats.tx.dropped.fw_rem_tx);
6280 	DP_PRINT_STATS("	FW removed untransmitted = %d",
6281 			pdev->stats.tx.dropped.fw_rem_notx);
6282 	DP_PRINT_STATS("	FW removed untransmitted fw_reason1 = %d",
6283 			pdev->stats.tx.dropped.fw_reason1);
6284 	DP_PRINT_STATS("	FW removed untransmitted fw_reason2 = %d",
6285 			pdev->stats.tx.dropped.fw_reason2);
6286 	DP_PRINT_STATS("	FW removed untransmitted fw_reason3 = %d",
6287 			pdev->stats.tx.dropped.fw_reason3);
6288 	DP_PRINT_STATS("	Aged Out from msdu/mpdu queues = %d",
6289 			pdev->stats.tx.dropped.age_out);
6290 	DP_PRINT_STATS("	headroom insufficient = %d",
6291 			pdev->stats.tx_i.dropped.headroom_insufficient);
6292 	DP_PRINT_STATS("	Multicast:");
6293 	DP_PRINT_STATS("	Packets: %u",
6294 		       pdev->stats.tx.mcast.num);
6295 	DP_PRINT_STATS("	Bytes: %llu",
6296 		       pdev->stats.tx.mcast.bytes);
6297 	DP_PRINT_STATS("Scatter Gather:");
6298 	DP_PRINT_STATS("	Packets = %d",
6299 			pdev->stats.tx_i.sg.sg_pkt.num);
6300 	DP_PRINT_STATS("	Bytes = %llu",
6301 			pdev->stats.tx_i.sg.sg_pkt.bytes);
6302 	DP_PRINT_STATS("	Dropped By Host = %d",
6303 			pdev->stats.tx_i.sg.dropped_host.num);
6304 	DP_PRINT_STATS("	Dropped By Target = %d",
6305 			pdev->stats.tx_i.sg.dropped_target);
6306 	DP_PRINT_STATS("TSO:");
6307 	DP_PRINT_STATS("	Number of Segments = %d",
6308 			pdev->stats.tx_i.tso.num_seg);
6309 	DP_PRINT_STATS("	Packets = %d",
6310 			pdev->stats.tx_i.tso.tso_pkt.num);
6311 	DP_PRINT_STATS("	Bytes = %llu",
6312 			pdev->stats.tx_i.tso.tso_pkt.bytes);
6313 	DP_PRINT_STATS("	Dropped By Host = %d",
6314 			pdev->stats.tx_i.tso.dropped_host.num);
6315 	DP_PRINT_STATS("Mcast Enhancement:");
6316 	DP_PRINT_STATS("	Packets = %d",
6317 			pdev->stats.tx_i.mcast_en.mcast_pkt.num);
6318 	DP_PRINT_STATS("	Bytes = %llu",
6319 			pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
6320 	DP_PRINT_STATS("	Dropped: Map Errors = %d",
6321 			pdev->stats.tx_i.mcast_en.dropped_map_error);
6322 	DP_PRINT_STATS("	Dropped: Self Mac = %d",
6323 			pdev->stats.tx_i.mcast_en.dropped_self_mac);
6324 	DP_PRINT_STATS("	Dropped: Send Fail = %d",
6325 			pdev->stats.tx_i.mcast_en.dropped_send_fail);
6326 	DP_PRINT_STATS("	Unicast sent = %d",
6327 			pdev->stats.tx_i.mcast_en.ucast);
6328 	DP_PRINT_STATS("Raw:");
6329 	DP_PRINT_STATS("	Packets = %d",
6330 			pdev->stats.tx_i.raw.raw_pkt.num);
6331 	DP_PRINT_STATS("	Bytes = %llu",
6332 			pdev->stats.tx_i.raw.raw_pkt.bytes);
6333 	DP_PRINT_STATS("	DMA map error = %d",
6334 			pdev->stats.tx_i.raw.dma_map_error);
6335 	DP_PRINT_STATS("Reinjected:");
6336 	DP_PRINT_STATS("	Packets = %d",
6337 			pdev->stats.tx_i.reinject_pkts.num);
6338 	DP_PRINT_STATS("	Bytes = %llu\n",
6339 			pdev->stats.tx_i.reinject_pkts.bytes);
6340 	DP_PRINT_STATS("Inspected:");
6341 	DP_PRINT_STATS("	Packets = %d",
6342 			pdev->stats.tx_i.inspect_pkts.num);
6343 	DP_PRINT_STATS("	Bytes = %llu",
6344 			pdev->stats.tx_i.inspect_pkts.bytes);
6345 	DP_PRINT_STATS("Nawds Multicast:");
6346 	DP_PRINT_STATS("	Packets = %d",
6347 			pdev->stats.tx_i.nawds_mcast.num);
6348 	DP_PRINT_STATS("	Bytes = %llu",
6349 			pdev->stats.tx_i.nawds_mcast.bytes);
6350 	DP_PRINT_STATS("CCE Classified:");
6351 	DP_PRINT_STATS("	CCE Classified Packets: %u",
6352 			pdev->stats.tx_i.cce_classified);
6353 	DP_PRINT_STATS("	RAW CCE Classified Packets: %u",
6354 			pdev->stats.tx_i.cce_classified_raw);
6355 	DP_PRINT_STATS("Mesh stats:");
6356 	DP_PRINT_STATS("	frames to firmware: %u",
6357 			pdev->stats.tx_i.mesh.exception_fw);
6358 	DP_PRINT_STATS("	completions from fw: %u",
6359 			pdev->stats.tx_i.mesh.completion_fw);
6360 	DP_PRINT_STATS("PPDU stats counter");
6361 	for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
6362 		DP_PRINT_STATS("	Tag[%d] = %llu", index,
6363 				pdev->stats.ppdu_stats_counter[index]);
6364 	}
6365 
6366 }
6367 
6368 /**
6369  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
6370  * @pdev: DP_PDEV Handle
6371  *
6372  * Return: void
6373  */
6374 static inline void
6375 dp_print_pdev_rx_stats(struct dp_pdev *pdev)
6376 {
6377 	DP_PRINT_STATS("PDEV Rx Stats:\n");
6378 	DP_PRINT_STATS("Received From HW (Per Rx Ring):");
6379 	DP_PRINT_STATS("	Packets = %d %d %d %d",
6380 			pdev->stats.rx.rcvd_reo[0].num,
6381 			pdev->stats.rx.rcvd_reo[1].num,
6382 			pdev->stats.rx.rcvd_reo[2].num,
6383 			pdev->stats.rx.rcvd_reo[3].num);
6384 	DP_PRINT_STATS("	Bytes = %llu %llu %llu %llu",
6385 			pdev->stats.rx.rcvd_reo[0].bytes,
6386 			pdev->stats.rx.rcvd_reo[1].bytes,
6387 			pdev->stats.rx.rcvd_reo[2].bytes,
6388 			pdev->stats.rx.rcvd_reo[3].bytes);
6389 	DP_PRINT_STATS("Replenished:");
6390 	DP_PRINT_STATS("	Packets = %d",
6391 			pdev->stats.replenish.pkts.num);
6392 	DP_PRINT_STATS("	Bytes = %llu",
6393 			pdev->stats.replenish.pkts.bytes);
6394 	DP_PRINT_STATS("	Buffers Added To Freelist = %d",
6395 			pdev->stats.buf_freelist);
6396 	DP_PRINT_STATS("	Low threshold intr = %d",
6397 			pdev->stats.replenish.low_thresh_intrs);
6398 	DP_PRINT_STATS("Dropped:");
6399 	DP_PRINT_STATS("	msdu_not_done = %d",
6400 			pdev->stats.dropped.msdu_not_done);
6401 	DP_PRINT_STATS("        mon_rx_drop = %d",
6402 			pdev->stats.dropped.mon_rx_drop);
6403 	DP_PRINT_STATS("        mec_drop = %d",
6404 		       pdev->stats.rx.mec_drop.num);
6405 	DP_PRINT_STATS("	Bytes = %llu",
6406 		       pdev->stats.rx.mec_drop.bytes);
6407 	DP_PRINT_STATS("Sent To Stack:");
6408 	DP_PRINT_STATS("	Packets = %d",
6409 			pdev->stats.rx.to_stack.num);
6410 	DP_PRINT_STATS("	Bytes = %llu",
6411 			pdev->stats.rx.to_stack.bytes);
6412 	DP_PRINT_STATS("Multicast/Broadcast:");
6413 	DP_PRINT_STATS("	Packets = %d",
6414 			pdev->stats.rx.multicast.num);
6415 	DP_PRINT_STATS("	Bytes = %llu",
6416 			pdev->stats.rx.multicast.bytes);
6417 	DP_PRINT_STATS("Errors:");
6418 	DP_PRINT_STATS("	Rxdma Ring Un-inititalized = %d",
6419 			pdev->stats.replenish.rxdma_err);
6420 	DP_PRINT_STATS("	Desc Alloc Failed: = %d",
6421 			pdev->stats.err.desc_alloc_fail);
6422 	DP_PRINT_STATS("	IP checksum error = %d",
6423 		       pdev->stats.err.ip_csum_err);
6424 	DP_PRINT_STATS("	TCP/UDP checksum error = %d",
6425 		       pdev->stats.err.tcp_udp_csum_err);
6426 
6427 	/* Get bar_recv_cnt */
6428 	dp_aggregate_pdev_ctrl_frames_stats(pdev);
6429 	DP_PRINT_STATS("BAR Received Count: = %d",
6430 			pdev->stats.rx.bar_recv_cnt);
6431 
6432 }
6433 
6434 /**
6435  * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
6436  * @pdev: DP_PDEV Handle
6437  *
6438  * Return: void
6439  */
6440 static inline void
6441 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
6442 {
6443 	struct cdp_pdev_mon_stats *rx_mon_stats;
6444 
6445 	rx_mon_stats = &pdev->rx_mon_stats;
6446 
6447 	DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
6448 
6449 	dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
6450 
6451 	DP_PRINT_STATS("status_ppdu_done_cnt = %d",
6452 		       rx_mon_stats->status_ppdu_done);
6453 	DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
6454 		       rx_mon_stats->dest_ppdu_done);
6455 	DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
6456 		       rx_mon_stats->dest_mpdu_done);
6457 	DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
6458 		       rx_mon_stats->dest_mpdu_drop);
6459 	DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d",
6460 		       rx_mon_stats->dup_mon_linkdesc_cnt);
6461 	DP_PRINT_STATS("dup_mon_buf_cnt = %d",
6462 		       rx_mon_stats->dup_mon_buf_cnt);
6463 }
6464 
6465 /**
6466  * dp_print_soc_tx_stats(): Print SOC level  stats
6467  * @soc DP_SOC Handle
6468  *
6469  * Return: void
6470  */
6471 static inline void
6472 dp_print_soc_tx_stats(struct dp_soc *soc)
6473 {
6474 	uint8_t desc_pool_id;
6475 	soc->stats.tx.desc_in_use = 0;
6476 
6477 	DP_PRINT_STATS("SOC Tx Stats:\n");
6478 
6479 	for (desc_pool_id = 0;
6480 	     desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6481 	     desc_pool_id++)
6482 		soc->stats.tx.desc_in_use +=
6483 			soc->tx_desc[desc_pool_id].num_allocated;
6484 
6485 	DP_PRINT_STATS("Tx Descriptors In Use = %d",
6486 			soc->stats.tx.desc_in_use);
6487 	DP_PRINT_STATS("Tx Invalid peer:");
6488 	DP_PRINT_STATS("	Packets = %d",
6489 			soc->stats.tx.tx_invalid_peer.num);
6490 	DP_PRINT_STATS("	Bytes = %llu",
6491 			soc->stats.tx.tx_invalid_peer.bytes);
6492 	DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
6493 			soc->stats.tx.tcl_ring_full[0],
6494 			soc->stats.tx.tcl_ring_full[1],
6495 			soc->stats.tx.tcl_ring_full[2]);
6496 
6497 }
6498 /**
6499  * dp_print_soc_rx_stats: Print SOC level Rx stats
6500  * @soc: DP_SOC Handle
6501  *
6502  * Return:void
6503  */
6504 static inline void
6505 dp_print_soc_rx_stats(struct dp_soc *soc)
6506 {
6507 	uint32_t i;
6508 	char reo_error[DP_REO_ERR_LENGTH];
6509 	char rxdma_error[DP_RXDMA_ERR_LENGTH];
6510 	uint8_t index = 0;
6511 
6512 	DP_PRINT_STATS("SOC Rx Stats:\n");
6513 	DP_PRINT_STATS("Fragmented packets: %u",
6514 		       soc->stats.rx.rx_frags);
6515 	DP_PRINT_STATS("Reo reinjected packets: %u",
6516 		       soc->stats.rx.reo_reinject);
6517 	DP_PRINT_STATS("Errors:\n");
6518 	DP_PRINT_STATS("Rx Decrypt Errors = %d",
6519 			(soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
6520 			soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
6521 	DP_PRINT_STATS("Invalid RBM = %d",
6522 			soc->stats.rx.err.invalid_rbm);
6523 	DP_PRINT_STATS("Invalid Vdev = %d",
6524 			soc->stats.rx.err.invalid_vdev);
6525 	DP_PRINT_STATS("Invalid Pdev = %d",
6526 			soc->stats.rx.err.invalid_pdev);
6527 	DP_PRINT_STATS("Invalid Peer = %d",
6528 			soc->stats.rx.err.rx_invalid_peer.num);
6529 	DP_PRINT_STATS("HAL Ring Access Fail = %d",
6530 			soc->stats.rx.err.hal_ring_access_fail);
6531 	DP_PRINT_STATS("RX frags: %d", soc->stats.rx.rx_frags);
6532 	DP_PRINT_STATS("RX frag wait: %d", soc->stats.rx.rx_frag_wait);
6533 	DP_PRINT_STATS("RX frag err: %d", soc->stats.rx.rx_frag_err);
6534 	DP_PRINT_STATS("RX HP out_of_sync: %d", soc->stats.rx.hp_oos);
6535 	DP_PRINT_STATS("RX DUP DESC: %d",
6536 		       soc->stats.rx.err.hal_reo_dest_dup);
6537 	DP_PRINT_STATS("RX REL DUP DESC: %d",
6538 		       soc->stats.rx.err.hal_wbm_rel_dup);
6539 
6540 	for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
6541 		index += qdf_snprint(&rxdma_error[index],
6542 				DP_RXDMA_ERR_LENGTH - index,
6543 				" %d", soc->stats.rx.err.rxdma_error[i]);
6544 	}
6545 	DP_PRINT_STATS("RXDMA Error (0-31):%s",
6546 			rxdma_error);
6547 
6548 	index = 0;
6549 	for (i = 0; i < HAL_REO_ERR_MAX; i++) {
6550 		index += qdf_snprint(&reo_error[index],
6551 				DP_REO_ERR_LENGTH - index,
6552 				" %d", soc->stats.rx.err.reo_error[i]);
6553 	}
6554 	DP_PRINT_STATS("REO Error(0-14):%s",
6555 			reo_error);
6556 }
6557 
6558 /**
6559  * dp_srng_get_str_from_ring_type() - Return string name for a ring
6560  * @ring_type: Ring
6561  *
6562  * Return: char const pointer
6563  */
6564 static inline const
6565 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
6566 {
6567 	switch (ring_type) {
6568 	case REO_DST:
6569 		return "Reo_dst";
6570 	case REO_EXCEPTION:
6571 		return "Reo_exception";
6572 	case REO_CMD:
6573 		return "Reo_cmd";
6574 	case REO_REINJECT:
6575 		return "Reo_reinject";
6576 	case REO_STATUS:
6577 		return "Reo_status";
6578 	case WBM2SW_RELEASE:
6579 		return "wbm2sw_release";
6580 	case TCL_DATA:
6581 		return "tcl_data";
6582 	case TCL_CMD:
6583 		return "tcl_cmd";
6584 	case TCL_STATUS:
6585 		return "tcl_status";
6586 	case SW2WBM_RELEASE:
6587 		return "sw2wbm_release";
6588 	case RXDMA_BUF:
6589 		return "Rxdma_buf";
6590 	case RXDMA_DST:
6591 		return "Rxdma_dst";
6592 	case RXDMA_MONITOR_BUF:
6593 		return "Rxdma_monitor_buf";
6594 	case RXDMA_MONITOR_DESC:
6595 		return "Rxdma_monitor_desc";
6596 	case RXDMA_MONITOR_STATUS:
6597 		return "Rxdma_monitor_status";
6598 	default:
6599 		dp_err("Invalid ring type");
6600 		break;
6601 	}
6602 	return "Invalid";
6603 }
6604 
6605 /**
6606  * dp_print_ring_stat_from_hal(): Print hal level ring stats
6607  * @soc: DP_SOC handle
6608  * @srng: DP_SRNG handle
6609  * @ring_name: SRNG name
6610  * @ring_type: srng src/dst ring
6611  *
6612  * Return: void
6613  */
6614 static void
6615 dp_print_ring_stat_from_hal(struct dp_soc *soc,  struct dp_srng *srng,
6616 			    enum hal_ring_type ring_type)
6617 {
6618 	uint32_t tailp;
6619 	uint32_t headp;
6620 	int32_t hw_headp = -1;
6621 	int32_t hw_tailp = -1;
6622 	const char *ring_name;
6623 	struct hal_soc *hal_soc;
6624 
6625 	if (soc && srng && srng->hal_srng) {
6626 		hal_soc = (struct hal_soc *)soc->hal_soc;
6627 		ring_name = dp_srng_get_str_from_hal_ring_type(ring_type);
6628 
6629 		hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
6630 
6631 		DP_PRINT_STATS("%s:SW:Head pointer = %d Tail Pointer = %d\n",
6632 			       ring_name, headp, tailp);
6633 
6634 		hal_get_hw_hptp(hal_soc, srng->hal_srng, &hw_headp,
6635 				&hw_tailp, ring_type);
6636 
6637 		DP_PRINT_STATS("%s:HW:Head pointer = %d Tail Pointer = %d\n",
6638 			       ring_name, hw_headp, hw_tailp);
6639 	}
6640 
6641 }
6642 
6643 /**
6644  * dp_print_mon_ring_stats_from_hal() - Print stat for monitor rings based
6645  *					on target
6646  * @pdev: physical device handle
6647  * @mac_id: mac id
6648  *
6649  * Return: void
6650  */
6651 static inline
6652 void dp_print_mon_ring_stat_from_hal(struct dp_pdev *pdev, uint8_t mac_id)
6653 {
6654 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) {
6655 		dp_print_ring_stat_from_hal(pdev->soc,
6656 					    &pdev->rxdma_mon_buf_ring[mac_id],
6657 					    RXDMA_MONITOR_BUF);
6658 		dp_print_ring_stat_from_hal(pdev->soc,
6659 					    &pdev->rxdma_mon_dst_ring[mac_id],
6660 					    RXDMA_MONITOR_DST);
6661 		dp_print_ring_stat_from_hal(pdev->soc,
6662 					    &pdev->rxdma_mon_desc_ring[mac_id],
6663 					    RXDMA_MONITOR_DESC);
6664 	}
6665 
6666 	dp_print_ring_stat_from_hal(pdev->soc,
6667 				    &pdev->rxdma_mon_status_ring[mac_id],
6668 				    RXDMA_MONITOR_STATUS);
6669 }
6670 
6671 /**
6672  * dp_print_ring_stats(): Print tail and head pointer
6673  * @pdev: DP_PDEV handle
6674  *
6675  * Return:void
6676  */
6677 static inline void
6678 dp_print_ring_stats(struct dp_pdev *pdev)
6679 {
6680 	uint32_t i;
6681 	int mac_id;
6682 
6683 	dp_print_ring_stat_from_hal(pdev->soc,
6684 				    &pdev->soc->reo_exception_ring,
6685 				    REO_EXCEPTION);
6686 	dp_print_ring_stat_from_hal(pdev->soc,
6687 				    &pdev->soc->reo_reinject_ring,
6688 				    REO_REINJECT);
6689 	dp_print_ring_stat_from_hal(pdev->soc,
6690 				    &pdev->soc->reo_cmd_ring,
6691 				    REO_CMD);
6692 	dp_print_ring_stat_from_hal(pdev->soc,
6693 				    &pdev->soc->reo_status_ring,
6694 				    REO_STATUS);
6695 	dp_print_ring_stat_from_hal(pdev->soc,
6696 				    &pdev->soc->rx_rel_ring,
6697 				    WBM2SW_RELEASE);
6698 	dp_print_ring_stat_from_hal(pdev->soc,
6699 				    &pdev->soc->tcl_cmd_ring,
6700 				    TCL_CMD);
6701 	dp_print_ring_stat_from_hal(pdev->soc,
6702 				    &pdev->soc->tcl_status_ring,
6703 				    TCL_STATUS);
6704 	dp_print_ring_stat_from_hal(pdev->soc,
6705 				    &pdev->soc->wbm_desc_rel_ring,
6706 				    SW2WBM_RELEASE);
6707 	for (i = 0; i < MAX_REO_DEST_RINGS; i++)
6708 		dp_print_ring_stat_from_hal(pdev->soc,
6709 					    &pdev->soc->reo_dest_ring[i],
6710 					    REO_DST);
6711 
6712 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++)
6713 		dp_print_ring_stat_from_hal(pdev->soc,
6714 					    &pdev->soc->tcl_data_ring[i],
6715 					    TCL_DATA);
6716 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++)
6717 		dp_print_ring_stat_from_hal(pdev->soc,
6718 					    &pdev->soc->tx_comp_ring[i],
6719 					    WBM2SW_RELEASE);
6720 
6721 	dp_print_ring_stat_from_hal(pdev->soc,
6722 				    &pdev->rx_refill_buf_ring,
6723 				    RXDMA_BUF);
6724 
6725 	dp_print_ring_stat_from_hal(pdev->soc,
6726 				    &pdev->rx_refill_buf_ring2,
6727 				    RXDMA_BUF);
6728 
6729 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
6730 		dp_print_ring_stat_from_hal(pdev->soc,
6731 					    &pdev->rx_mac_buf_ring[i],
6732 					    RXDMA_BUF);
6733 
6734 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
6735 		dp_print_mon_ring_stat_from_hal(pdev, mac_id);
6736 
6737 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++)
6738 		dp_print_ring_stat_from_hal(pdev->soc,
6739 					    &pdev->rxdma_err_dst_ring[i],
6740 					    RXDMA_DST);
6741 
6742 }
6743 
6744 /**
6745  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
6746  * @vdev: DP_VDEV handle
6747  *
6748  * Return:void
6749  */
6750 static inline void
6751 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
6752 {
6753 	struct dp_peer *peer = NULL;
6754 
6755 	if (!vdev || !vdev->pdev)
6756 		return;
6757 
6758 	DP_STATS_CLR(vdev->pdev);
6759 	DP_STATS_CLR(vdev->pdev->soc);
6760 	DP_STATS_CLR(vdev);
6761 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6762 		if (!peer)
6763 			return;
6764 		DP_STATS_CLR(peer);
6765 
6766 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6767 		dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
6768 				     &peer->stats,  peer->peer_ids[0],
6769 				     UPDATE_PEER_STATS, vdev->pdev->pdev_id);
6770 #endif
6771 	}
6772 
6773 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6774 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
6775 			     &vdev->stats,  vdev->vdev_id,
6776 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
6777 #endif
6778 }
6779 
6780 /**
6781  * dp_print_common_rates_info(): Print common rate for tx or rx
6782  * @pkt_type_array: rate type array contains rate info
6783  *
6784  * Return:void
6785  */
6786 static inline void
6787 dp_print_common_rates_info(struct cdp_pkt_type *pkt_type_array)
6788 {
6789 	uint8_t mcs, pkt_type;
6790 
6791 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
6792 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
6793 			if (!dp_rate_string[pkt_type][mcs].valid)
6794 				continue;
6795 
6796 			DP_PRINT_STATS("	%s = %d",
6797 				       dp_rate_string[pkt_type][mcs].mcs_type,
6798 				       pkt_type_array[pkt_type].mcs_count[mcs]);
6799 		}
6800 
6801 		DP_PRINT_STATS("\n");
6802 	}
6803 }
6804 
6805 /**
6806  * dp_print_rx_rates(): Print Rx rate stats
6807  * @vdev: DP_VDEV handle
6808  *
6809  * Return:void
6810  */
6811 static inline void
6812 dp_print_rx_rates(struct dp_vdev *vdev)
6813 {
6814 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6815 	uint8_t i;
6816 	uint8_t index = 0;
6817 	char nss[DP_NSS_LENGTH];
6818 
6819 	DP_PRINT_STATS("Rx Rate Info:\n");
6820 	dp_print_common_rates_info(pdev->stats.rx.pkt_type);
6821 
6822 
6823 	index = 0;
6824 	for (i = 0; i < SS_COUNT; i++) {
6825 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6826 				" %d", pdev->stats.rx.nss[i]);
6827 	}
6828 	DP_PRINT_STATS("NSS(1-8) = %s",
6829 			nss);
6830 
6831 	DP_PRINT_STATS("SGI ="
6832 			" 0.8us %d,"
6833 			" 0.4us %d,"
6834 			" 1.6us %d,"
6835 			" 3.2us %d,",
6836 			pdev->stats.rx.sgi_count[0],
6837 			pdev->stats.rx.sgi_count[1],
6838 			pdev->stats.rx.sgi_count[2],
6839 			pdev->stats.rx.sgi_count[3]);
6840 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
6841 			pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
6842 			pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
6843 	DP_PRINT_STATS("Reception Type ="
6844 			" SU: %d,"
6845 			" MU_MIMO:%d,"
6846 			" MU_OFDMA:%d,"
6847 			" MU_OFDMA_MIMO:%d\n",
6848 			pdev->stats.rx.reception_type[0],
6849 			pdev->stats.rx.reception_type[1],
6850 			pdev->stats.rx.reception_type[2],
6851 			pdev->stats.rx.reception_type[3]);
6852 	DP_PRINT_STATS("Aggregation:\n");
6853 	DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
6854 			pdev->stats.rx.ampdu_cnt);
6855 	DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
6856 			pdev->stats.rx.non_ampdu_cnt);
6857 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
6858 			pdev->stats.rx.amsdu_cnt);
6859 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
6860 			pdev->stats.rx.non_amsdu_cnt);
6861 }
6862 
6863 /**
6864  * dp_print_tx_rates(): Print tx rates
6865  * @vdev: DP_VDEV handle
6866  *
6867  * Return:void
6868  */
6869 static inline void
6870 dp_print_tx_rates(struct dp_vdev *vdev)
6871 {
6872 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6873 	uint8_t index;
6874 	char nss[DP_NSS_LENGTH];
6875 	int nss_index;
6876 
6877 	DP_PRINT_STATS("Tx Rate Info:\n");
6878 	dp_print_common_rates_info(pdev->stats.tx.pkt_type);
6879 
6880 	DP_PRINT_STATS("SGI ="
6881 			" 0.8us %d"
6882 			" 0.4us %d"
6883 			" 1.6us %d"
6884 			" 3.2us %d",
6885 			pdev->stats.tx.sgi_count[0],
6886 			pdev->stats.tx.sgi_count[1],
6887 			pdev->stats.tx.sgi_count[2],
6888 			pdev->stats.tx.sgi_count[3]);
6889 
6890 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
6891 			pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
6892 			pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
6893 
6894 	index = 0;
6895 	for (nss_index = 0; nss_index < SS_COUNT; nss_index++) {
6896 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6897 				" %d", pdev->stats.tx.nss[nss_index]);
6898 	}
6899 
6900 	DP_PRINT_STATS("NSS(1-8) = %s", nss);
6901 	DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
6902 	DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
6903 	DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
6904 	DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
6905 	DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
6906 
6907 	DP_PRINT_STATS("Aggregation:\n");
6908 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
6909 			pdev->stats.tx.amsdu_cnt);
6910 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
6911 			pdev->stats.tx.non_amsdu_cnt);
6912 }
6913 
6914 /**
6915  * dp_print_peer_stats():print peer stats
6916  * @peer: DP_PEER handle
6917  *
6918  * return void
6919  */
6920 static inline void dp_print_peer_stats(struct dp_peer *peer)
6921 {
6922 	uint8_t i;
6923 	uint32_t index;
6924 	char nss[DP_NSS_LENGTH];
6925 	DP_PRINT_STATS("Node Tx Stats:\n");
6926 	DP_PRINT_STATS("Total Packet Completions = %d",
6927 			peer->stats.tx.comp_pkt.num);
6928 	DP_PRINT_STATS("Total Bytes Completions = %llu",
6929 			peer->stats.tx.comp_pkt.bytes);
6930 	DP_PRINT_STATS("Success Packets = %d",
6931 			peer->stats.tx.tx_success.num);
6932 	DP_PRINT_STATS("Success Bytes = %llu",
6933 			peer->stats.tx.tx_success.bytes);
6934 	DP_PRINT_STATS("Unicast Success Packets = %d",
6935 			peer->stats.tx.ucast.num);
6936 	DP_PRINT_STATS("Unicast Success Bytes = %llu",
6937 			peer->stats.tx.ucast.bytes);
6938 	DP_PRINT_STATS("Multicast Success Packets = %d",
6939 			peer->stats.tx.mcast.num);
6940 	DP_PRINT_STATS("Multicast Success Bytes = %llu",
6941 			peer->stats.tx.mcast.bytes);
6942 	DP_PRINT_STATS("Broadcast Success Packets = %d",
6943 			peer->stats.tx.bcast.num);
6944 	DP_PRINT_STATS("Broadcast Success Bytes = %llu",
6945 			peer->stats.tx.bcast.bytes);
6946 	DP_PRINT_STATS("Packets Failed = %d",
6947 			peer->stats.tx.tx_failed);
6948 	DP_PRINT_STATS("Packets In OFDMA = %d",
6949 			peer->stats.tx.ofdma);
6950 	DP_PRINT_STATS("Packets In STBC = %d",
6951 			peer->stats.tx.stbc);
6952 	DP_PRINT_STATS("Packets In LDPC = %d",
6953 			peer->stats.tx.ldpc);
6954 	DP_PRINT_STATS("Packet Retries = %d",
6955 			peer->stats.tx.retries);
6956 	DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
6957 			peer->stats.tx.amsdu_cnt);
6958 	DP_PRINT_STATS("Last Packet RSSI = %d",
6959 			peer->stats.tx.last_ack_rssi);
6960 	DP_PRINT_STATS("Dropped At FW: Removed Pkts = %u",
6961 		       peer->stats.tx.dropped.fw_rem.num);
6962 	DP_PRINT_STATS("Dropped At FW: Removed bytes = %llu",
6963 		       peer->stats.tx.dropped.fw_rem.bytes);
6964 	DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
6965 			peer->stats.tx.dropped.fw_rem_tx);
6966 	DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
6967 			peer->stats.tx.dropped.fw_rem_notx);
6968 	DP_PRINT_STATS("Dropped : Age Out = %d",
6969 			peer->stats.tx.dropped.age_out);
6970 	DP_PRINT_STATS("NAWDS : ");
6971 	DP_PRINT_STATS("	Nawds multicast Drop Tx Packet = %d",
6972 			peer->stats.tx.nawds_mcast_drop);
6973 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Count = %d",
6974 			peer->stats.tx.nawds_mcast.num);
6975 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Bytes = %llu",
6976 			peer->stats.tx.nawds_mcast.bytes);
6977 
6978 	DP_PRINT_STATS("Rate Info:");
6979 	dp_print_common_rates_info(peer->stats.tx.pkt_type);
6980 
6981 
6982 	DP_PRINT_STATS("SGI = "
6983 			" 0.8us %d"
6984 			" 0.4us %d"
6985 			" 1.6us %d"
6986 			" 3.2us %d",
6987 			peer->stats.tx.sgi_count[0],
6988 			peer->stats.tx.sgi_count[1],
6989 			peer->stats.tx.sgi_count[2],
6990 			peer->stats.tx.sgi_count[3]);
6991 	DP_PRINT_STATS("Excess Retries per AC ");
6992 	DP_PRINT_STATS("	 Best effort = %d",
6993 			peer->stats.tx.excess_retries_per_ac[0]);
6994 	DP_PRINT_STATS("	 Background= %d",
6995 			peer->stats.tx.excess_retries_per_ac[1]);
6996 	DP_PRINT_STATS("	 Video = %d",
6997 			peer->stats.tx.excess_retries_per_ac[2]);
6998 	DP_PRINT_STATS("	 Voice = %d",
6999 			peer->stats.tx.excess_retries_per_ac[3]);
7000 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
7001 			peer->stats.tx.bw[0], peer->stats.tx.bw[1],
7002 			peer->stats.tx.bw[2], peer->stats.tx.bw[3]);
7003 
7004 	index = 0;
7005 	for (i = 0; i < SS_COUNT; i++) {
7006 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
7007 				" %d", peer->stats.tx.nss[i]);
7008 	}
7009 	DP_PRINT_STATS("NSS(1-8) = %s",
7010 			nss);
7011 
7012 	DP_PRINT_STATS("Aggregation:");
7013 	DP_PRINT_STATS("	Number of Msdu's Part of Amsdu = %d",
7014 			peer->stats.tx.amsdu_cnt);
7015 	DP_PRINT_STATS("	Number of Msdu's With No Msdu Level Aggregation = %d\n",
7016 			peer->stats.tx.non_amsdu_cnt);
7017 
7018 	DP_PRINT_STATS("Bytes and Packets transmitted  in last one sec:");
7019 	DP_PRINT_STATS("	Bytes transmitted in last sec: %d",
7020 		       peer->stats.tx.tx_byte_rate);
7021 	DP_PRINT_STATS("	Data transmitted in last sec: %d",
7022 		       peer->stats.tx.tx_data_rate);
7023 
7024 	DP_PRINT_STATS("Node Rx Stats:");
7025 	DP_PRINT_STATS("Packets Sent To Stack = %d",
7026 			peer->stats.rx.to_stack.num);
7027 	DP_PRINT_STATS("Bytes Sent To Stack = %llu",
7028 			peer->stats.rx.to_stack.bytes);
7029 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
7030 		DP_PRINT_STATS("Ring Id = %d", i);
7031 		DP_PRINT_STATS("	Packets Received = %d",
7032 				peer->stats.rx.rcvd_reo[i].num);
7033 		DP_PRINT_STATS("	Bytes Received = %llu",
7034 				peer->stats.rx.rcvd_reo[i].bytes);
7035 	}
7036 	DP_PRINT_STATS("Multicast Packets Received = %d",
7037 			peer->stats.rx.multicast.num);
7038 	DP_PRINT_STATS("Multicast Bytes Received = %llu",
7039 			peer->stats.rx.multicast.bytes);
7040 	DP_PRINT_STATS("Broadcast Packets Received = %d",
7041 			peer->stats.rx.bcast.num);
7042 	DP_PRINT_STATS("Broadcast Bytes Received = %llu",
7043 			peer->stats.rx.bcast.bytes);
7044 	DP_PRINT_STATS("Intra BSS Packets Received = %d",
7045 			peer->stats.rx.intra_bss.pkts.num);
7046 	DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
7047 			peer->stats.rx.intra_bss.pkts.bytes);
7048 	DP_PRINT_STATS("Raw Packets Received = %d",
7049 			peer->stats.rx.raw.num);
7050 	DP_PRINT_STATS("Raw Bytes Received = %llu",
7051 			peer->stats.rx.raw.bytes);
7052 	DP_PRINT_STATS("Errors: MIC Errors = %d",
7053 			peer->stats.rx.err.mic_err);
7054 	DP_PRINT_STATS("Erros: Decryption Errors = %d",
7055 			peer->stats.rx.err.decrypt_err);
7056 	DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
7057 			peer->stats.rx.non_ampdu_cnt);
7058 	DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
7059 			peer->stats.rx.ampdu_cnt);
7060 	DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
7061 			peer->stats.rx.non_amsdu_cnt);
7062 	DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
7063 			peer->stats.rx.amsdu_cnt);
7064 	DP_PRINT_STATS("NAWDS : ");
7065 	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet = %d",
7066 			peer->stats.rx.nawds_mcast_drop);
7067 	DP_PRINT_STATS("SGI ="
7068 			" 0.8us %d"
7069 			" 0.4us %d"
7070 			" 1.6us %d"
7071 			" 3.2us %d",
7072 			peer->stats.rx.sgi_count[0],
7073 			peer->stats.rx.sgi_count[1],
7074 			peer->stats.rx.sgi_count[2],
7075 			peer->stats.rx.sgi_count[3]);
7076 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
7077 			peer->stats.rx.bw[0], peer->stats.rx.bw[1],
7078 			peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
7079 	DP_PRINT_STATS("Reception Type ="
7080 			" SU %d,"
7081 			" MU_MIMO %d,"
7082 			" MU_OFDMA %d,"
7083 			" MU_OFDMA_MIMO %d",
7084 			peer->stats.rx.reception_type[0],
7085 			peer->stats.rx.reception_type[1],
7086 			peer->stats.rx.reception_type[2],
7087 			peer->stats.rx.reception_type[3]);
7088 
7089 	dp_print_common_rates_info(peer->stats.rx.pkt_type);
7090 
7091 	index = 0;
7092 	for (i = 0; i < SS_COUNT; i++) {
7093 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
7094 				" %d", peer->stats.rx.nss[i]);
7095 	}
7096 	DP_PRINT_STATS("NSS(1-8) = %s",
7097 			nss);
7098 
7099 	DP_PRINT_STATS("Aggregation:");
7100 	DP_PRINT_STATS("	Msdu's Part of Ampdu = %d",
7101 			peer->stats.rx.ampdu_cnt);
7102 	DP_PRINT_STATS("	Msdu's With No Mpdu Level Aggregation = %d",
7103 			peer->stats.rx.non_ampdu_cnt);
7104 	DP_PRINT_STATS("	Msdu's Part of Amsdu = %d",
7105 			peer->stats.rx.amsdu_cnt);
7106 	DP_PRINT_STATS("	Msdu's With No Msdu Level Aggregation = %d",
7107 			peer->stats.rx.non_amsdu_cnt);
7108 
7109 	DP_PRINT_STATS("Bytes and Packets received in last one sec:");
7110 	DP_PRINT_STATS("	Bytes received in last sec: %d",
7111 		       peer->stats.rx.rx_byte_rate);
7112 	DP_PRINT_STATS("	Data received in last sec: %d",
7113 		       peer->stats.rx.rx_data_rate);
7114 }
7115 
7116 /*
7117  * dp_get_host_peer_stats()- function to print peer stats
7118  * @pdev_handle: DP_PDEV handle
7119  * @mac_addr: mac address of the peer
7120  *
7121  * Return: void
7122  */
7123 static void
7124 dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
7125 {
7126 	struct dp_peer *peer;
7127 	uint8_t local_id;
7128 
7129 	if (!mac_addr) {
7130 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7131 			  "Invalid MAC address\n");
7132 		return;
7133 	}
7134 
7135 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
7136 			&local_id);
7137 
7138 	if (!peer) {
7139 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7140 			  "%s: Invalid peer\n", __func__);
7141 		return;
7142 	}
7143 
7144 	dp_print_peer_stats(peer);
7145 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
7146 }
7147 
7148 /**
7149  * dp_print_soc_cfg_params()- Dump soc wlan config parameters
7150  * @soc_handle: Soc handle
7151  *
7152  * Return: void
7153  */
7154 static void
7155 dp_print_soc_cfg_params(struct dp_soc *soc)
7156 {
7157 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
7158 	uint8_t index = 0, i = 0;
7159 	char ring_mask[DP_MAX_INT_CONTEXTS_STRING_LENGTH];
7160 	int num_of_int_contexts;
7161 
7162 	if (!soc) {
7163 		dp_err("Context is null");
7164 		return;
7165 	}
7166 
7167 	soc_cfg_ctx = soc->wlan_cfg_ctx;
7168 
7169 	if (!soc_cfg_ctx) {
7170 		dp_err("Context is null");
7171 		return;
7172 	}
7173 
7174 	num_of_int_contexts =
7175 			wlan_cfg_get_num_contexts(soc_cfg_ctx);
7176 
7177 	DP_TRACE_STATS(DEBUG, "No. of interrupt contexts: %u",
7178 		       soc_cfg_ctx->num_int_ctxts);
7179 	DP_TRACE_STATS(DEBUG, "Max clients: %u",
7180 		       soc_cfg_ctx->max_clients);
7181 	DP_TRACE_STATS(DEBUG, "Max alloc size: %u ",
7182 		       soc_cfg_ctx->max_alloc_size);
7183 	DP_TRACE_STATS(DEBUG, "Per pdev tx ring: %u ",
7184 		       soc_cfg_ctx->per_pdev_tx_ring);
7185 	DP_TRACE_STATS(DEBUG, "Num tcl data rings: %u ",
7186 		       soc_cfg_ctx->num_tcl_data_rings);
7187 	DP_TRACE_STATS(DEBUG, "Per pdev rx ring: %u ",
7188 		       soc_cfg_ctx->per_pdev_rx_ring);
7189 	DP_TRACE_STATS(DEBUG, "Per pdev lmac ring: %u ",
7190 		       soc_cfg_ctx->per_pdev_lmac_ring);
7191 	DP_TRACE_STATS(DEBUG, "Num of reo dest rings: %u ",
7192 		       soc_cfg_ctx->num_reo_dest_rings);
7193 	DP_TRACE_STATS(DEBUG, "Num tx desc pool: %u ",
7194 		       soc_cfg_ctx->num_tx_desc_pool);
7195 	DP_TRACE_STATS(DEBUG, "Num tx ext desc pool: %u ",
7196 		       soc_cfg_ctx->num_tx_ext_desc_pool);
7197 	DP_TRACE_STATS(DEBUG, "Num tx desc: %u ",
7198 		       soc_cfg_ctx->num_tx_desc);
7199 	DP_TRACE_STATS(DEBUG, "Num tx ext desc: %u ",
7200 		       soc_cfg_ctx->num_tx_ext_desc);
7201 	DP_TRACE_STATS(DEBUG, "Htt packet type: %u ",
7202 		       soc_cfg_ctx->htt_packet_type);
7203 	DP_TRACE_STATS(DEBUG, "Max peer_ids: %u ",
7204 		       soc_cfg_ctx->max_peer_id);
7205 	DP_TRACE_STATS(DEBUG, "Tx ring size: %u ",
7206 		       soc_cfg_ctx->tx_ring_size);
7207 	DP_TRACE_STATS(DEBUG, "Tx comp ring size: %u ",
7208 		       soc_cfg_ctx->tx_comp_ring_size);
7209 	DP_TRACE_STATS(DEBUG, "Tx comp ring size nss: %u ",
7210 		       soc_cfg_ctx->tx_comp_ring_size_nss);
7211 	DP_TRACE_STATS(DEBUG, "Int batch threshold tx: %u ",
7212 		       soc_cfg_ctx->int_batch_threshold_tx);
7213 	DP_TRACE_STATS(DEBUG, "Int timer threshold tx: %u ",
7214 		       soc_cfg_ctx->int_timer_threshold_tx);
7215 	DP_TRACE_STATS(DEBUG, "Int batch threshold rx: %u ",
7216 		       soc_cfg_ctx->int_batch_threshold_rx);
7217 	DP_TRACE_STATS(DEBUG, "Int timer threshold rx: %u ",
7218 		       soc_cfg_ctx->int_timer_threshold_rx);
7219 	DP_TRACE_STATS(DEBUG, "Int batch threshold other: %u ",
7220 		       soc_cfg_ctx->int_batch_threshold_other);
7221 	DP_TRACE_STATS(DEBUG, "Int timer threshold other: %u ",
7222 		       soc_cfg_ctx->int_timer_threshold_other);
7223 
7224 	for (i = 0; i < num_of_int_contexts; i++) {
7225 		index += qdf_snprint(&ring_mask[index],
7226 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7227 				     " %d",
7228 				     soc_cfg_ctx->int_tx_ring_mask[i]);
7229 	}
7230 
7231 	DP_TRACE_STATS(DEBUG, "Tx ring mask (0-%d):%s",
7232 		       num_of_int_contexts, ring_mask);
7233 
7234 	index = 0;
7235 	for (i = 0; i < num_of_int_contexts; i++) {
7236 		index += qdf_snprint(&ring_mask[index],
7237 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7238 				     " %d",
7239 				     soc_cfg_ctx->int_rx_ring_mask[i]);
7240 	}
7241 
7242 	DP_TRACE_STATS(DEBUG, "Rx ring mask (0-%d):%s",
7243 		       num_of_int_contexts, ring_mask);
7244 
7245 	index = 0;
7246 	for (i = 0; i < num_of_int_contexts; i++) {
7247 		index += qdf_snprint(&ring_mask[index],
7248 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7249 				     " %d",
7250 				     soc_cfg_ctx->int_rx_mon_ring_mask[i]);
7251 	}
7252 
7253 	DP_TRACE_STATS(DEBUG, "Rx mon ring mask (0-%d):%s",
7254 		       num_of_int_contexts, ring_mask);
7255 
7256 	index = 0;
7257 	for (i = 0; i < num_of_int_contexts; i++) {
7258 		index += qdf_snprint(&ring_mask[index],
7259 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7260 				     " %d",
7261 				     soc_cfg_ctx->int_rx_err_ring_mask[i]);
7262 	}
7263 
7264 	DP_TRACE_STATS(DEBUG, "Rx err ring mask (0-%d):%s",
7265 		       num_of_int_contexts, ring_mask);
7266 
7267 	index = 0;
7268 	for (i = 0; i < num_of_int_contexts; i++) {
7269 		index += qdf_snprint(&ring_mask[index],
7270 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7271 				     " %d",
7272 				     soc_cfg_ctx->int_rx_wbm_rel_ring_mask[i]);
7273 	}
7274 
7275 	DP_TRACE_STATS(DEBUG, "Rx wbm rel ring mask (0-%d):%s",
7276 		       num_of_int_contexts, ring_mask);
7277 
7278 	index = 0;
7279 	for (i = 0; i < num_of_int_contexts; i++) {
7280 		index += qdf_snprint(&ring_mask[index],
7281 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7282 				     " %d",
7283 				     soc_cfg_ctx->int_reo_status_ring_mask[i]);
7284 	}
7285 
7286 	DP_TRACE_STATS(DEBUG, "Reo ring mask (0-%d):%s",
7287 		       num_of_int_contexts, ring_mask);
7288 
7289 	index = 0;
7290 	for (i = 0; i < num_of_int_contexts; i++) {
7291 		index += qdf_snprint(&ring_mask[index],
7292 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7293 				     " %d",
7294 				     soc_cfg_ctx->int_rxdma2host_ring_mask[i]);
7295 	}
7296 
7297 	DP_TRACE_STATS(DEBUG, "Rxdma2host ring mask (0-%d):%s",
7298 		       num_of_int_contexts, ring_mask);
7299 
7300 	index = 0;
7301 	for (i = 0; i < num_of_int_contexts; i++) {
7302 		index += qdf_snprint(&ring_mask[index],
7303 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7304 				     " %d",
7305 				     soc_cfg_ctx->int_host2rxdma_ring_mask[i]);
7306 	}
7307 
7308 	DP_TRACE_STATS(DEBUG, "Host2rxdma ring mask (0-%d):%s",
7309 		       num_of_int_contexts, ring_mask);
7310 
7311 	DP_TRACE_STATS(DEBUG, "Rx hash: %u ",
7312 		       soc_cfg_ctx->rx_hash);
7313 	DP_TRACE_STATS(DEBUG, "Tso enabled: %u ",
7314 		       soc_cfg_ctx->tso_enabled);
7315 	DP_TRACE_STATS(DEBUG, "Lro enabled: %u ",
7316 		       soc_cfg_ctx->lro_enabled);
7317 	DP_TRACE_STATS(DEBUG, "Sg enabled: %u ",
7318 		       soc_cfg_ctx->sg_enabled);
7319 	DP_TRACE_STATS(DEBUG, "Gro enabled: %u ",
7320 		       soc_cfg_ctx->gro_enabled);
7321 	DP_TRACE_STATS(DEBUG, "rawmode enabled: %u ",
7322 		       soc_cfg_ctx->rawmode_enabled);
7323 	DP_TRACE_STATS(DEBUG, "peer flow ctrl enabled: %u ",
7324 		       soc_cfg_ctx->peer_flow_ctrl_enabled);
7325 	DP_TRACE_STATS(DEBUG, "napi enabled: %u ",
7326 		       soc_cfg_ctx->napi_enabled);
7327 	DP_TRACE_STATS(DEBUG, "Tcp Udp checksum offload: %u ",
7328 		       soc_cfg_ctx->tcp_udp_checksumoffload);
7329 	DP_TRACE_STATS(DEBUG, "Defrag timeout check: %u ",
7330 		       soc_cfg_ctx->defrag_timeout_check);
7331 	DP_TRACE_STATS(DEBUG, "Rx defrag min timeout: %u ",
7332 		       soc_cfg_ctx->rx_defrag_min_timeout);
7333 	DP_TRACE_STATS(DEBUG, "WBM release ring: %u ",
7334 		       soc_cfg_ctx->wbm_release_ring);
7335 	DP_TRACE_STATS(DEBUG, "TCL CMD ring: %u ",
7336 		       soc_cfg_ctx->tcl_cmd_ring);
7337 	DP_TRACE_STATS(DEBUG, "TCL Status ring: %u ",
7338 		       soc_cfg_ctx->tcl_status_ring);
7339 	DP_TRACE_STATS(DEBUG, "REO Reinject ring: %u ",
7340 		       soc_cfg_ctx->reo_reinject_ring);
7341 	DP_TRACE_STATS(DEBUG, "RX release ring: %u ",
7342 		       soc_cfg_ctx->rx_release_ring);
7343 	DP_TRACE_STATS(DEBUG, "REO Exception ring: %u ",
7344 		       soc_cfg_ctx->reo_exception_ring);
7345 	DP_TRACE_STATS(DEBUG, "REO CMD ring: %u ",
7346 		       soc_cfg_ctx->reo_cmd_ring);
7347 	DP_TRACE_STATS(DEBUG, "REO STATUS ring: %u ",
7348 		       soc_cfg_ctx->reo_status_ring);
7349 	DP_TRACE_STATS(DEBUG, "RXDMA refill ring: %u ",
7350 		       soc_cfg_ctx->rxdma_refill_ring);
7351 	DP_TRACE_STATS(DEBUG, "RXDMA err dst ring: %u ",
7352 		       soc_cfg_ctx->rxdma_err_dst_ring);
7353 }
7354 
7355 /**
7356  * dp_print_vdev_cfg_params() - Print the pdev cfg parameters
7357  * @pdev_handle: DP pdev handle
7358  *
7359  * Return - void
7360  */
7361 static void
7362 dp_print_pdev_cfg_params(struct dp_pdev *pdev)
7363 {
7364 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
7365 
7366 	if (!pdev) {
7367 		dp_err("Context is null");
7368 		return;
7369 	}
7370 
7371 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
7372 
7373 	if (!pdev_cfg_ctx) {
7374 		dp_err("Context is null");
7375 		return;
7376 	}
7377 
7378 	DP_TRACE_STATS(DEBUG, "Rx dma buf ring size: %d ",
7379 		       pdev_cfg_ctx->rx_dma_buf_ring_size);
7380 	DP_TRACE_STATS(DEBUG, "DMA Mon buf ring size: %d ",
7381 		       pdev_cfg_ctx->dma_mon_buf_ring_size);
7382 	DP_TRACE_STATS(DEBUG, "DMA Mon dest ring size: %d ",
7383 		       pdev_cfg_ctx->dma_mon_dest_ring_size);
7384 	DP_TRACE_STATS(DEBUG, "DMA Mon status ring size: %d ",
7385 		       pdev_cfg_ctx->dma_mon_status_ring_size);
7386 	DP_TRACE_STATS(DEBUG, "Rxdma monitor desc ring: %d",
7387 		       pdev_cfg_ctx->rxdma_monitor_desc_ring);
7388 	DP_TRACE_STATS(DEBUG, "Num mac rings: %d ",
7389 		       pdev_cfg_ctx->num_mac_rings);
7390 }
7391 
7392 /**
7393  * dp_txrx_stats_help() - Helper function for Txrx_Stats
7394  *
7395  * Return: None
7396  */
7397 static void dp_txrx_stats_help(void)
7398 {
7399 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
7400 	dp_info("stats_option:");
7401 	dp_info("  1 -- HTT Tx Statistics");
7402 	dp_info("  2 -- HTT Rx Statistics");
7403 	dp_info("  3 -- HTT Tx HW Queue Statistics");
7404 	dp_info("  4 -- HTT Tx HW Sched Statistics");
7405 	dp_info("  5 -- HTT Error Statistics");
7406 	dp_info("  6 -- HTT TQM Statistics");
7407 	dp_info("  7 -- HTT TQM CMDQ Statistics");
7408 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
7409 	dp_info("  9 -- HTT Tx Rate Statistics");
7410 	dp_info(" 10 -- HTT Rx Rate Statistics");
7411 	dp_info(" 11 -- HTT Peer Statistics");
7412 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
7413 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
7414 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
7415 	dp_info(" 15 -- HTT SRNG Statistics");
7416 	dp_info(" 16 -- HTT SFM Info Statistics");
7417 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
7418 	dp_info(" 18 -- HTT Peer List Details");
7419 	dp_info(" 20 -- Clear Host Statistics");
7420 	dp_info(" 21 -- Host Rx Rate Statistics");
7421 	dp_info(" 22 -- Host Tx Rate Statistics");
7422 	dp_info(" 23 -- Host Tx Statistics");
7423 	dp_info(" 24 -- Host Rx Statistics");
7424 	dp_info(" 25 -- Host AST Statistics");
7425 	dp_info(" 26 -- Host SRNG PTR Statistics");
7426 	dp_info(" 27 -- Host Mon Statistics");
7427 	dp_info(" 28 -- Host REO Queue Statistics");
7428 	dp_info(" 29 -- Host Soc cfg param Statistics");
7429 	dp_info(" 30 -- Host pdev cfg param Statistics");
7430 }
7431 
7432 /**
7433  * dp_print_host_stats()- Function to print the stats aggregated at host
7434  * @vdev_handle: DP_VDEV handle
7435  * @type: host stats type
7436  *
7437  * Return: 0 on success, print error message in case of failure
7438  */
7439 static int
7440 dp_print_host_stats(struct cdp_vdev *vdev_handle,
7441 		    struct cdp_txrx_stats_req *req)
7442 {
7443 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7444 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7445 	enum cdp_host_txrx_stats type =
7446 			dp_stats_mapping_table[req->stats][STATS_HOST];
7447 
7448 	dp_aggregate_pdev_stats(pdev);
7449 
7450 	switch (type) {
7451 	case TXRX_CLEAR_STATS:
7452 		dp_txrx_host_stats_clr(vdev);
7453 		break;
7454 	case TXRX_RX_RATE_STATS:
7455 		dp_print_rx_rates(vdev);
7456 		break;
7457 	case TXRX_TX_RATE_STATS:
7458 		dp_print_tx_rates(vdev);
7459 		break;
7460 	case TXRX_TX_HOST_STATS:
7461 		dp_print_pdev_tx_stats(pdev);
7462 		dp_print_soc_tx_stats(pdev->soc);
7463 		break;
7464 	case TXRX_RX_HOST_STATS:
7465 		dp_print_pdev_rx_stats(pdev);
7466 		dp_print_soc_rx_stats(pdev->soc);
7467 		break;
7468 	case TXRX_AST_STATS:
7469 		dp_print_ast_stats(pdev->soc);
7470 		dp_print_peer_table(vdev);
7471 		break;
7472 	case TXRX_SRNG_PTR_STATS:
7473 		dp_print_ring_stats(pdev);
7474 		break;
7475 	case TXRX_RX_MON_STATS:
7476 		dp_print_pdev_rx_mon_stats(pdev);
7477 		break;
7478 	case TXRX_REO_QUEUE_STATS:
7479 		dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
7480 		break;
7481 	case TXRX_SOC_CFG_PARAMS:
7482 		dp_print_soc_cfg_params(pdev->soc);
7483 		break;
7484 	case TXRX_PDEV_CFG_PARAMS:
7485 		dp_print_pdev_cfg_params(pdev);
7486 		break;
7487 	default:
7488 		dp_info("Wrong Input For TxRx Host Stats");
7489 		dp_txrx_stats_help();
7490 		break;
7491 	}
7492 	return 0;
7493 }
7494 
7495 /*
7496  * dp_ppdu_ring_reset()- Reset PPDU Stats ring
7497  * @pdev: DP_PDEV handle
7498  *
7499  * Return: void
7500  */
7501 static void
7502 dp_ppdu_ring_reset(struct dp_pdev *pdev)
7503 {
7504 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
7505 	int mac_id;
7506 
7507 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
7508 
7509 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
7510 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
7511 							pdev->pdev_id);
7512 
7513 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
7514 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
7515 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
7516 	}
7517 }
7518 
7519 /*
7520  * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
7521  * @pdev: DP_PDEV handle
7522  *
7523  * Return: void
7524  */
7525 static void
7526 dp_ppdu_ring_cfg(struct dp_pdev *pdev)
7527 {
7528 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
7529 	int mac_id;
7530 
7531 	htt_tlv_filter.mpdu_start = 1;
7532 	htt_tlv_filter.msdu_start = 0;
7533 	htt_tlv_filter.packet = 0;
7534 	htt_tlv_filter.msdu_end = 0;
7535 	htt_tlv_filter.mpdu_end = 0;
7536 	htt_tlv_filter.attention = 0;
7537 	htt_tlv_filter.ppdu_start = 1;
7538 	htt_tlv_filter.ppdu_end = 1;
7539 	htt_tlv_filter.ppdu_end_user_stats = 1;
7540 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
7541 	htt_tlv_filter.ppdu_end_status_done = 1;
7542 	htt_tlv_filter.enable_fp = 1;
7543 	htt_tlv_filter.enable_md = 0;
7544 	if (pdev->neighbour_peers_added &&
7545 	    pdev->soc->hw_nac_monitor_support) {
7546 		htt_tlv_filter.enable_md = 1;
7547 		htt_tlv_filter.packet_header = 1;
7548 	}
7549 	if (pdev->mcopy_mode) {
7550 		htt_tlv_filter.packet_header = 1;
7551 		htt_tlv_filter.enable_mo = 1;
7552 	}
7553 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
7554 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
7555 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
7556 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
7557 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
7558 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
7559 	if (pdev->neighbour_peers_added &&
7560 	    pdev->soc->hw_nac_monitor_support)
7561 		htt_tlv_filter.md_data_filter = FILTER_DATA_ALL;
7562 
7563 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
7564 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
7565 						pdev->pdev_id);
7566 
7567 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
7568 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
7569 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
7570 	}
7571 }
7572 
7573 /*
7574  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
7575  *                              modes are enabled or not.
7576  * @dp_pdev: dp pdev handle.
7577  *
7578  * Return: bool
7579  */
7580 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
7581 {
7582 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
7583 	    !pdev->mcopy_mode)
7584 		return true;
7585 	else
7586 		return false;
7587 }
7588 
7589 /*
7590  *dp_set_bpr_enable() - API to enable/disable bpr feature
7591  *@pdev_handle: DP_PDEV handle.
7592  *@val: Provided value.
7593  *
7594  *Return: 0 for success. nonzero for failure.
7595  */
7596 static QDF_STATUS
7597 dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
7598 {
7599 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7600 
7601 	switch (val) {
7602 	case CDP_BPR_DISABLE:
7603 		pdev->bpr_enable = CDP_BPR_DISABLE;
7604 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7605 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
7606 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7607 		} else if (pdev->enhanced_stats_en &&
7608 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7609 			   !pdev->pktlog_ppdu_stats) {
7610 			dp_h2t_cfg_stats_msg_send(pdev,
7611 						  DP_PPDU_STATS_CFG_ENH_STATS,
7612 						  pdev->pdev_id);
7613 		}
7614 		break;
7615 	case CDP_BPR_ENABLE:
7616 		pdev->bpr_enable = CDP_BPR_ENABLE;
7617 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
7618 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
7619 			dp_h2t_cfg_stats_msg_send(pdev,
7620 						  DP_PPDU_STATS_CFG_BPR,
7621 						  pdev->pdev_id);
7622 		} else if (pdev->enhanced_stats_en &&
7623 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7624 			   !pdev->pktlog_ppdu_stats) {
7625 			dp_h2t_cfg_stats_msg_send(pdev,
7626 						  DP_PPDU_STATS_CFG_BPR_ENH,
7627 						  pdev->pdev_id);
7628 		} else if (pdev->pktlog_ppdu_stats) {
7629 			dp_h2t_cfg_stats_msg_send(pdev,
7630 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
7631 						  pdev->pdev_id);
7632 		}
7633 		break;
7634 	default:
7635 		break;
7636 	}
7637 
7638 	return QDF_STATUS_SUCCESS;
7639 }
7640 
7641 /*
7642  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
7643  * @pdev_handle: DP_PDEV handle
7644  * @val: user provided value
7645  *
7646  * Return: 0 for success. nonzero for failure.
7647  */
7648 static QDF_STATUS
7649 dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
7650 {
7651 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7652 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7653 
7654 	if (pdev->mcopy_mode)
7655 		dp_reset_monitor_mode(pdev_handle);
7656 
7657 	switch (val) {
7658 	case 0:
7659 		pdev->tx_sniffer_enable = 0;
7660 		pdev->mcopy_mode = 0;
7661 		pdev->monitor_configured = false;
7662 
7663 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7664 		    !pdev->bpr_enable) {
7665 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7666 			dp_ppdu_ring_reset(pdev);
7667 		} else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
7668 			dp_h2t_cfg_stats_msg_send(pdev,
7669 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7670 		} else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
7671 			dp_h2t_cfg_stats_msg_send(pdev,
7672 						  DP_PPDU_STATS_CFG_BPR_ENH,
7673 						  pdev->pdev_id);
7674 		} else {
7675 			dp_h2t_cfg_stats_msg_send(pdev,
7676 						  DP_PPDU_STATS_CFG_BPR,
7677 						  pdev->pdev_id);
7678 		}
7679 		break;
7680 
7681 	case 1:
7682 		pdev->tx_sniffer_enable = 1;
7683 		pdev->mcopy_mode = 0;
7684 		pdev->monitor_configured = false;
7685 
7686 		if (!pdev->pktlog_ppdu_stats)
7687 			dp_h2t_cfg_stats_msg_send(pdev,
7688 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7689 		break;
7690 	case 2:
7691 		if (pdev->monitor_vdev) {
7692 			status = QDF_STATUS_E_RESOURCES;
7693 			break;
7694 		}
7695 
7696 		pdev->mcopy_mode = 1;
7697 		dp_pdev_configure_monitor_rings(pdev);
7698 		pdev->monitor_configured = true;
7699 		pdev->tx_sniffer_enable = 0;
7700 
7701 		if (!pdev->pktlog_ppdu_stats)
7702 			dp_h2t_cfg_stats_msg_send(pdev,
7703 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7704 		break;
7705 	default:
7706 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7707 			"Invalid value");
7708 		break;
7709 	}
7710 	return status;
7711 }
7712 
7713 /*
7714  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
7715  * @pdev_handle: DP_PDEV handle
7716  *
7717  * Return: void
7718  */
7719 static void
7720 dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
7721 {
7722 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7723 
7724 	if (pdev->enhanced_stats_en == 0)
7725 		dp_cal_client_timer_start(pdev->cal_client_ctx);
7726 
7727 	pdev->enhanced_stats_en = 1;
7728 
7729 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
7730 	    !pdev->monitor_vdev)
7731 		dp_ppdu_ring_cfg(pdev);
7732 
7733 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7734 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7735 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7736 		dp_h2t_cfg_stats_msg_send(pdev,
7737 					  DP_PPDU_STATS_CFG_BPR_ENH,
7738 					  pdev->pdev_id);
7739 	}
7740 }
7741 
7742 /*
7743  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
7744  * @pdev_handle: DP_PDEV handle
7745  *
7746  * Return: void
7747  */
7748 static void
7749 dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
7750 {
7751 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7752 
7753 	if (pdev->enhanced_stats_en == 1)
7754 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
7755 
7756 	pdev->enhanced_stats_en = 0;
7757 
7758 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7759 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7760 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7761 		dp_h2t_cfg_stats_msg_send(pdev,
7762 					  DP_PPDU_STATS_CFG_BPR,
7763 					  pdev->pdev_id);
7764 	}
7765 
7766 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
7767 	    !pdev->monitor_vdev)
7768 		dp_ppdu_ring_reset(pdev);
7769 }
7770 
7771 /*
7772  * dp_get_fw_peer_stats()- function to print peer stats
7773  * @pdev_handle: DP_PDEV handle
7774  * @mac_addr: mac address of the peer
7775  * @cap: Type of htt stats requested
7776  *
7777  * Currently Supporting only MAC ID based requests Only
7778  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
7779  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
7780  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
7781  *
7782  * Return: void
7783  */
7784 static void
7785 dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
7786 		uint32_t cap)
7787 {
7788 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7789 	int i;
7790 	uint32_t config_param0 = 0;
7791 	uint32_t config_param1 = 0;
7792 	uint32_t config_param2 = 0;
7793 	uint32_t config_param3 = 0;
7794 
7795 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
7796 	config_param0 |= (1 << (cap + 1));
7797 
7798 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
7799 		config_param1 |= (1 << i);
7800 	}
7801 
7802 	config_param2 |= (mac_addr[0] & 0x000000ff);
7803 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
7804 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
7805 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
7806 
7807 	config_param3 |= (mac_addr[4] & 0x000000ff);
7808 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
7809 
7810 	dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
7811 			config_param0, config_param1, config_param2,
7812 			config_param3, 0, 0, 0);
7813 
7814 }
7815 
7816 /* This struct definition will be removed from here
7817  * once it get added in FW headers*/
7818 struct httstats_cmd_req {
7819     uint32_t    config_param0;
7820     uint32_t    config_param1;
7821     uint32_t    config_param2;
7822     uint32_t    config_param3;
7823     int cookie;
7824     u_int8_t    stats_id;
7825 };
7826 
7827 /*
7828  * dp_get_htt_stats: function to process the httstas request
7829  * @pdev_handle: DP pdev handle
7830  * @data: pointer to request data
7831  * @data_len: length for request data
7832  *
7833  * return: void
7834  */
7835 static void
7836 dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
7837 {
7838 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7839 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
7840 
7841 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
7842 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
7843 				req->config_param0, req->config_param1,
7844 				req->config_param2, req->config_param3,
7845 				req->cookie, 0, 0);
7846 }
7847 
7848 /*
7849  * dp_set_pdev_param: function to set parameters in pdev
7850  * @pdev_handle: DP pdev handle
7851  * @param: parameter type to be set
7852  * @val: value of parameter to be set
7853  *
7854  * Return: 0 for success. nonzero for failure.
7855  */
7856 static QDF_STATUS dp_set_pdev_param(struct cdp_pdev *pdev_handle,
7857 				    enum cdp_pdev_param_type param,
7858 				    uint8_t val)
7859 {
7860 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7861 	switch (param) {
7862 	case CDP_CONFIG_DEBUG_SNIFFER:
7863 		return dp_config_debug_sniffer(pdev_handle, val);
7864 	case CDP_CONFIG_BPR_ENABLE:
7865 		return dp_set_bpr_enable(pdev_handle, val);
7866 	case CDP_CONFIG_PRIMARY_RADIO:
7867 		pdev->is_primary = val;
7868 		break;
7869 	default:
7870 		return QDF_STATUS_E_INVAL;
7871 	}
7872 	return QDF_STATUS_SUCCESS;
7873 }
7874 
7875 /*
7876  * dp_get_vdev_param: function to get parameters from vdev
7877  * @param: parameter type to get value
7878  *
7879  * return: void
7880  */
7881 static uint32_t dp_get_vdev_param(struct cdp_vdev *vdev_handle,
7882 				  enum cdp_vdev_param_type param)
7883 {
7884 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7885 	uint32_t val;
7886 
7887 	switch (param) {
7888 	case CDP_ENABLE_WDS:
7889 		val = vdev->wds_enabled;
7890 		break;
7891 	case CDP_ENABLE_MEC:
7892 		val = vdev->mec_enabled;
7893 		break;
7894 	case CDP_ENABLE_DA_WAR:
7895 		val = vdev->da_war_enabled;
7896 		break;
7897 	default:
7898 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7899 			  "param value %d is wrong\n",
7900 			  param);
7901 		val = -1;
7902 		break;
7903 	}
7904 
7905 	return val;
7906 }
7907 
7908 /*
7909  * dp_set_vdev_param: function to set parameters in vdev
7910  * @param: parameter type to be set
7911  * @val: value of parameter to be set
7912  *
7913  * return: void
7914  */
7915 static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
7916 		enum cdp_vdev_param_type param, uint32_t val)
7917 {
7918 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7919 	switch (param) {
7920 	case CDP_ENABLE_WDS:
7921 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7922 			  "wds_enable %d for vdev(%p) id(%d)\n",
7923 			  val, vdev, vdev->vdev_id);
7924 		vdev->wds_enabled = val;
7925 		break;
7926 	case CDP_ENABLE_MEC:
7927 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7928 			  "mec_enable %d for vdev(%p) id(%d)\n",
7929 			  val, vdev, vdev->vdev_id);
7930 		vdev->mec_enabled = val;
7931 		break;
7932 	case CDP_ENABLE_DA_WAR:
7933 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7934 			  "da_war_enable %d for vdev(%p) id(%d)\n",
7935 			  val, vdev, vdev->vdev_id);
7936 		vdev->da_war_enabled = val;
7937 		break;
7938 	case CDP_ENABLE_NAWDS:
7939 		vdev->nawds_enabled = val;
7940 		break;
7941 	case CDP_ENABLE_MCAST_EN:
7942 		vdev->mcast_enhancement_en = val;
7943 		break;
7944 	case CDP_ENABLE_PROXYSTA:
7945 		vdev->proxysta_vdev = val;
7946 		break;
7947 	case CDP_UPDATE_TDLS_FLAGS:
7948 		vdev->tdls_link_connected = val;
7949 		break;
7950 	case CDP_CFG_WDS_AGING_TIMER:
7951 		if (val == 0)
7952 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
7953 		else if (val != vdev->wds_aging_timer_val)
7954 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, val);
7955 
7956 		vdev->wds_aging_timer_val = val;
7957 		break;
7958 	case CDP_ENABLE_AP_BRIDGE:
7959 		if (wlan_op_mode_sta != vdev->opmode)
7960 			vdev->ap_bridge_enabled = val;
7961 		else
7962 			vdev->ap_bridge_enabled = false;
7963 		break;
7964 	case CDP_ENABLE_CIPHER:
7965 		vdev->sec_type = val;
7966 		break;
7967 	case CDP_ENABLE_QWRAP_ISOLATION:
7968 		vdev->isolation_vdev = val;
7969 		break;
7970 	default:
7971 		break;
7972 	}
7973 
7974 	dp_tx_vdev_update_search_flags(vdev);
7975 }
7976 
7977 /**
7978  * dp_peer_set_nawds: set nawds bit in peer
7979  * @peer_handle: pointer to peer
7980  * @value: enable/disable nawds
7981  *
7982  * return: void
7983  */
7984 static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
7985 {
7986 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
7987 	peer->nawds_enabled = value;
7988 }
7989 
7990 /*
7991  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
7992  * @vdev_handle: DP_VDEV handle
7993  * @map_id:ID of map that needs to be updated
7994  *
7995  * Return: void
7996  */
7997 static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
7998 		uint8_t map_id)
7999 {
8000 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8001 	vdev->dscp_tid_map_id = map_id;
8002 	return;
8003 }
8004 
8005 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
8006  * @peer_handle: DP pdev handle
8007  *
8008  * return : cdp_pdev_stats pointer
8009  */
8010 static struct cdp_pdev_stats*
8011 dp_txrx_get_pdev_stats(struct cdp_pdev *pdev_handle)
8012 {
8013 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8014 
8015 	dp_aggregate_pdev_stats(pdev);
8016 
8017 	return &pdev->stats;
8018 }
8019 
8020 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
8021  * @peer_handle: DP_PEER handle
8022  *
8023  * return : cdp_peer_stats pointer
8024  */
8025 static struct cdp_peer_stats*
8026 		dp_txrx_get_peer_stats(struct cdp_peer *peer_handle)
8027 {
8028 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8029 
8030 	qdf_assert(peer);
8031 
8032 	return &peer->stats;
8033 }
8034 
8035 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
8036  * @peer_handle: DP_PEER handle
8037  *
8038  * return : void
8039  */
8040 static void dp_txrx_reset_peer_stats(struct cdp_peer *peer_handle)
8041 {
8042 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8043 
8044 	qdf_assert(peer);
8045 
8046 	qdf_mem_set(&peer->stats, sizeof(peer->stats), 0);
8047 }
8048 
8049 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
8050  * @vdev_handle: DP_VDEV handle
8051  * @buf: buffer for vdev stats
8052  *
8053  * return : int
8054  */
8055 static int  dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
8056 				   bool is_aggregate)
8057 {
8058 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8059 	struct cdp_vdev_stats *vdev_stats = (struct cdp_vdev_stats *)buf;
8060 
8061 	if (is_aggregate)
8062 		dp_aggregate_vdev_stats(vdev, buf);
8063 	else
8064 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
8065 
8066 	return 0;
8067 }
8068 
8069 /*
8070  * dp_get_total_per(): get total per
8071  * @pdev_handle: DP_PDEV handle
8072  *
8073  * Return: % error rate using retries per packet and success packets
8074  */
8075 static int dp_get_total_per(struct cdp_pdev *pdev_handle)
8076 {
8077 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8078 
8079 	dp_aggregate_pdev_stats(pdev);
8080 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
8081 		return 0;
8082 	return ((pdev->stats.tx.retries * 100) /
8083 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
8084 }
8085 
8086 /*
8087  * dp_txrx_stats_publish(): publish pdev stats into a buffer
8088  * @pdev_handle: DP_PDEV handle
8089  * @buf: to hold pdev_stats
8090  *
8091  * Return: int
8092  */
8093 static int
8094 dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
8095 {
8096 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8097 	struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
8098 	struct cdp_txrx_stats_req req = {0,};
8099 
8100 	dp_aggregate_pdev_stats(pdev);
8101 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
8102 	req.cookie_val = 1;
8103 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
8104 				req.param1, req.param2, req.param3, 0,
8105 				req.cookie_val, 0);
8106 
8107 	msleep(DP_MAX_SLEEP_TIME);
8108 
8109 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
8110 	req.cookie_val = 1;
8111 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
8112 				req.param1, req.param2, req.param3, 0,
8113 				req.cookie_val, 0);
8114 
8115 	msleep(DP_MAX_SLEEP_TIME);
8116 	qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
8117 
8118 	return TXRX_STATS_LEVEL;
8119 }
8120 
8121 /**
8122  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
8123  * @pdev: DP_PDEV handle
8124  * @map_id: ID of map that needs to be updated
8125  * @tos: index value in map
8126  * @tid: tid value passed by the user
8127  *
8128  * Return: void
8129  */
8130 static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
8131 		uint8_t map_id, uint8_t tos, uint8_t tid)
8132 {
8133 	uint8_t dscp;
8134 	struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
8135 	struct dp_soc *soc = pdev->soc;
8136 
8137 	if (!soc)
8138 		return;
8139 
8140 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
8141 	pdev->dscp_tid_map[map_id][dscp] = tid;
8142 
8143 	if (map_id < soc->num_hw_dscp_tid_map)
8144 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
8145 				       map_id, dscp);
8146 	return;
8147 }
8148 
8149 /**
8150  * dp_hmmc_tid_override_en_wifi3(): Function to enable hmmc tid override.
8151  * @pdev_handle: pdev handle
8152  * @val: hmmc-dscp flag value
8153  *
8154  * Return: void
8155  */
8156 static void dp_hmmc_tid_override_en_wifi3(struct cdp_pdev *pdev_handle,
8157 					  bool val)
8158 {
8159 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8160 
8161 	pdev->hmmc_tid_override_en = val;
8162 }
8163 
8164 /**
8165  * dp_set_hmmc_tid_val_wifi3(): Function to set hmmc tid value.
8166  * @pdev_handle: pdev handle
8167  * @tid: tid value
8168  *
8169  * Return: void
8170  */
8171 static void dp_set_hmmc_tid_val_wifi3(struct cdp_pdev *pdev_handle,
8172 				      uint8_t tid)
8173 {
8174 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8175 
8176 	pdev->hmmc_tid = tid;
8177 }
8178 
8179 /**
8180  * dp_fw_stats_process(): Process TxRX FW stats request
8181  * @vdev_handle: DP VDEV handle
8182  * @req: stats request
8183  *
8184  * return: int
8185  */
8186 static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
8187 		struct cdp_txrx_stats_req *req)
8188 {
8189 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8190 	struct dp_pdev *pdev = NULL;
8191 	uint32_t stats = req->stats;
8192 	uint8_t mac_id = req->mac_id;
8193 
8194 	if (!vdev) {
8195 		DP_TRACE(NONE, "VDEV not found");
8196 		return 1;
8197 	}
8198 	pdev = vdev->pdev;
8199 
8200 	/*
8201 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
8202 	 * from param0 to param3 according to below rule:
8203 	 *
8204 	 * PARAM:
8205 	 *   - config_param0 : start_offset (stats type)
8206 	 *   - config_param1 : stats bmask from start offset
8207 	 *   - config_param2 : stats bmask from start offset + 32
8208 	 *   - config_param3 : stats bmask from start offset + 64
8209 	 */
8210 	if (req->stats == CDP_TXRX_STATS_0) {
8211 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
8212 		req->param1 = 0xFFFFFFFF;
8213 		req->param2 = 0xFFFFFFFF;
8214 		req->param3 = 0xFFFFFFFF;
8215 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
8216 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
8217 	}
8218 
8219 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
8220 				req->param1, req->param2, req->param3,
8221 				0, 0, mac_id);
8222 }
8223 
8224 /**
8225  * dp_txrx_stats_request - function to map to firmware and host stats
8226  * @vdev: virtual handle
8227  * @req: stats request
8228  *
8229  * Return: QDF_STATUS
8230  */
8231 static
8232 QDF_STATUS dp_txrx_stats_request(struct cdp_vdev *vdev,
8233 				 struct cdp_txrx_stats_req *req)
8234 {
8235 	int host_stats;
8236 	int fw_stats;
8237 	enum cdp_stats stats;
8238 	int num_stats;
8239 
8240 	if (!vdev || !req) {
8241 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8242 				"Invalid vdev/req instance");
8243 		return QDF_STATUS_E_INVAL;
8244 	}
8245 
8246 	stats = req->stats;
8247 	if (stats >= CDP_TXRX_MAX_STATS)
8248 		return QDF_STATUS_E_INVAL;
8249 
8250 	/*
8251 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
8252 	 *			has to be updated if new FW HTT stats added
8253 	 */
8254 	if (stats > CDP_TXRX_STATS_HTT_MAX)
8255 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
8256 
8257 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
8258 
8259 	if (stats >= num_stats) {
8260 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8261 			  "%s: Invalid stats option: %d", __func__, stats);
8262 		return QDF_STATUS_E_INVAL;
8263 	}
8264 
8265 	req->stats = stats;
8266 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
8267 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
8268 
8269 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8270 		 "stats: %u fw_stats_type: %d host_stats: %d",
8271 		  stats, fw_stats, host_stats);
8272 
8273 	if (fw_stats != TXRX_FW_STATS_INVALID) {
8274 		/* update request with FW stats type */
8275 		req->stats = fw_stats;
8276 		return dp_fw_stats_process(vdev, req);
8277 	}
8278 
8279 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
8280 			(host_stats <= TXRX_HOST_STATS_MAX))
8281 		return dp_print_host_stats(vdev, req);
8282 	else
8283 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8284 				"Wrong Input for TxRx Stats");
8285 
8286 	return QDF_STATUS_SUCCESS;
8287 }
8288 
8289 /*
8290  * dp_print_napi_stats(): NAPI stats
8291  * @soc - soc handle
8292  */
8293 static void dp_print_napi_stats(struct dp_soc *soc)
8294 {
8295 	hif_print_napi_stats(soc->hif_handle);
8296 }
8297 
8298 /*
8299  * dp_print_per_ring_stats(): Packet count per ring
8300  * @soc - soc handle
8301  */
8302 static void dp_print_per_ring_stats(struct dp_soc *soc)
8303 {
8304 	uint8_t ring;
8305 	uint16_t core;
8306 	uint64_t total_packets;
8307 
8308 	DP_TRACE_STATS(INFO_HIGH, "Reo packets per ring:");
8309 	for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
8310 		total_packets = 0;
8311 		DP_TRACE_STATS(INFO_HIGH,
8312 			       "Packets on ring %u:", ring);
8313 		for (core = 0; core < NR_CPUS; core++) {
8314 			DP_TRACE_STATS(INFO_HIGH,
8315 				       "Packets arriving on core %u: %llu",
8316 				       core,
8317 				       soc->stats.rx.ring_packets[core][ring]);
8318 			total_packets += soc->stats.rx.ring_packets[core][ring];
8319 		}
8320 		DP_TRACE_STATS(INFO_HIGH,
8321 			       "Total packets on ring %u: %llu",
8322 			       ring, total_packets);
8323 	}
8324 }
8325 
8326 /*
8327  * dp_txrx_path_stats() - Function to display dump stats
8328  * @soc - soc handle
8329  *
8330  * return: none
8331  */
8332 static void dp_txrx_path_stats(struct dp_soc *soc)
8333 {
8334 	uint8_t error_code;
8335 	uint8_t loop_pdev;
8336 	struct dp_pdev *pdev;
8337 	uint8_t i;
8338 
8339 	if (!soc) {
8340 		DP_TRACE(ERROR, "%s: Invalid access",
8341 			 __func__);
8342 		return;
8343 	}
8344 
8345 	for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
8346 
8347 		pdev = soc->pdev_list[loop_pdev];
8348 		dp_aggregate_pdev_stats(pdev);
8349 		DP_TRACE_STATS(INFO_HIGH, "Tx path Statistics:");
8350 		DP_TRACE_STATS(INFO_HIGH, "from stack: %u msdus (%llu bytes)",
8351 			       pdev->stats.tx_i.rcvd.num,
8352 			       pdev->stats.tx_i.rcvd.bytes);
8353 		DP_TRACE_STATS(INFO_HIGH,
8354 			       "processed from host: %u msdus (%llu bytes)",
8355 			       pdev->stats.tx_i.processed.num,
8356 			       pdev->stats.tx_i.processed.bytes);
8357 		DP_TRACE_STATS(INFO_HIGH,
8358 			       "successfully transmitted: %u msdus (%llu bytes)",
8359 			       pdev->stats.tx.tx_success.num,
8360 			       pdev->stats.tx.tx_success.bytes);
8361 
8362 		DP_TRACE_STATS(INFO_HIGH, "Dropped in host:");
8363 		DP_TRACE_STATS(INFO_HIGH, "Total packets dropped: %u,",
8364 			       pdev->stats.tx_i.dropped.dropped_pkt.num);
8365 		DP_TRACE_STATS(INFO_HIGH, "Descriptor not available: %u",
8366 			       pdev->stats.tx_i.dropped.desc_na.num);
8367 		DP_TRACE_STATS(INFO_HIGH, "Ring full: %u",
8368 			       pdev->stats.tx_i.dropped.ring_full);
8369 		DP_TRACE_STATS(INFO_HIGH, "Enqueue fail: %u",
8370 			       pdev->stats.tx_i.dropped.enqueue_fail);
8371 		DP_TRACE_STATS(INFO_HIGH, "DMA Error: %u",
8372 			       pdev->stats.tx_i.dropped.dma_error);
8373 
8374 		DP_TRACE_STATS(INFO_HIGH, "Dropped in hardware:");
8375 		DP_TRACE_STATS(INFO_HIGH, "total packets dropped: %u",
8376 			       pdev->stats.tx.tx_failed);
8377 		DP_TRACE_STATS(INFO_HIGH, "mpdu age out: %u",
8378 			       pdev->stats.tx.dropped.age_out);
8379 		DP_TRACE_STATS(INFO_HIGH, "firmware removed packets: %u",
8380 			       pdev->stats.tx.dropped.fw_rem.num);
8381 		DP_TRACE_STATS(INFO_HIGH, "firmware removed bytes: %llu",
8382 			       pdev->stats.tx.dropped.fw_rem.bytes);
8383 		DP_TRACE_STATS(INFO_HIGH, "firmware removed tx: %u",
8384 			       pdev->stats.tx.dropped.fw_rem_tx);
8385 		DP_TRACE_STATS(INFO_HIGH, "firmware removed notx %u",
8386 			       pdev->stats.tx.dropped.fw_rem_notx);
8387 		DP_TRACE_STATS(INFO_HIGH, "Invalid peer on tx path: %u",
8388 			       pdev->soc->stats.tx.tx_invalid_peer.num);
8389 
8390 		DP_TRACE_STATS(INFO_HIGH, "Tx packets sent per interrupt:");
8391 		DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
8392 			       pdev->stats.tx_comp_histogram.pkts_1);
8393 		DP_TRACE_STATS(INFO_HIGH, "2-20 Packets:  %u",
8394 			       pdev->stats.tx_comp_histogram.pkts_2_20);
8395 		DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
8396 			       pdev->stats.tx_comp_histogram.pkts_21_40);
8397 		DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
8398 			       pdev->stats.tx_comp_histogram.pkts_41_60);
8399 		DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
8400 			       pdev->stats.tx_comp_histogram.pkts_61_80);
8401 		DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
8402 			       pdev->stats.tx_comp_histogram.pkts_81_100);
8403 		DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
8404 			       pdev->stats.tx_comp_histogram.pkts_101_200);
8405 		DP_TRACE_STATS(INFO_HIGH, "   201+ Packets: %u",
8406 			       pdev->stats.tx_comp_histogram.pkts_201_plus);
8407 
8408 		DP_TRACE_STATS(INFO_HIGH, "Rx path statistics");
8409 
8410 		DP_TRACE_STATS(INFO_HIGH,
8411 			       "delivered %u msdus ( %llu bytes),",
8412 			       pdev->stats.rx.to_stack.num,
8413 			       pdev->stats.rx.to_stack.bytes);
8414 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)
8415 			DP_TRACE_STATS(INFO_HIGH,
8416 				       "received on reo[%d] %u msdus( %llu bytes),",
8417 				       i, pdev->stats.rx.rcvd_reo[i].num,
8418 				       pdev->stats.rx.rcvd_reo[i].bytes);
8419 		DP_TRACE_STATS(INFO_HIGH,
8420 			       "intra-bss packets %u msdus ( %llu bytes),",
8421 			       pdev->stats.rx.intra_bss.pkts.num,
8422 			       pdev->stats.rx.intra_bss.pkts.bytes);
8423 		DP_TRACE_STATS(INFO_HIGH,
8424 			       "intra-bss fails %u msdus ( %llu bytes),",
8425 			       pdev->stats.rx.intra_bss.fail.num,
8426 			       pdev->stats.rx.intra_bss.fail.bytes);
8427 		DP_TRACE_STATS(INFO_HIGH,
8428 			       "raw packets %u msdus ( %llu bytes),",
8429 			       pdev->stats.rx.raw.num,
8430 			       pdev->stats.rx.raw.bytes);
8431 		DP_TRACE_STATS(INFO_HIGH, "dropped: error %u msdus",
8432 			       pdev->stats.rx.err.mic_err);
8433 		DP_TRACE_STATS(INFO_HIGH, "Invalid peer on rx path: %u",
8434 			       pdev->soc->stats.rx.err.rx_invalid_peer.num);
8435 
8436 		DP_TRACE_STATS(INFO_HIGH, "Reo Statistics");
8437 		DP_TRACE_STATS(INFO_HIGH, "rbm error: %u msdus",
8438 			       pdev->soc->stats.rx.err.invalid_rbm);
8439 		DP_TRACE_STATS(INFO_HIGH, "hal ring access fail: %u msdus",
8440 			       pdev->soc->stats.rx.err.hal_ring_access_fail);
8441 
8442 		for (error_code = 0; error_code < HAL_REO_ERR_MAX;
8443 				error_code++) {
8444 			if (!pdev->soc->stats.rx.err.reo_error[error_code])
8445 				continue;
8446 			DP_TRACE_STATS(INFO_HIGH,
8447 				       "Reo error number (%u): %u msdus",
8448 				       error_code,
8449 				       pdev->soc->stats.rx.err
8450 				       .reo_error[error_code]);
8451 		}
8452 
8453 		for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
8454 				error_code++) {
8455 			if (!pdev->soc->stats.rx.err.rxdma_error[error_code])
8456 				continue;
8457 			DP_TRACE_STATS(INFO_HIGH,
8458 				       "Rxdma error number (%u): %u msdus",
8459 				       error_code,
8460 				       pdev->soc->stats.rx.err
8461 				       .rxdma_error[error_code]);
8462 		}
8463 
8464 		DP_TRACE_STATS(INFO_HIGH, "Rx packets reaped per interrupt:");
8465 		DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
8466 			       pdev->stats.rx_ind_histogram.pkts_1);
8467 		DP_TRACE_STATS(INFO_HIGH, "2-20 Packets:  %u",
8468 			       pdev->stats.rx_ind_histogram.pkts_2_20);
8469 		DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
8470 			       pdev->stats.rx_ind_histogram.pkts_21_40);
8471 		DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
8472 			       pdev->stats.rx_ind_histogram.pkts_41_60);
8473 		DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
8474 			       pdev->stats.rx_ind_histogram.pkts_61_80);
8475 		DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
8476 			       pdev->stats.rx_ind_histogram.pkts_81_100);
8477 		DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
8478 			       pdev->stats.rx_ind_histogram.pkts_101_200);
8479 		DP_TRACE_STATS(INFO_HIGH, "   201+ Packets: %u",
8480 			       pdev->stats.rx_ind_histogram.pkts_201_plus);
8481 
8482 		DP_TRACE_STATS(INFO_HIGH, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
8483 			       __func__,
8484 			       pdev->soc->wlan_cfg_ctx
8485 			       ->tso_enabled,
8486 			       pdev->soc->wlan_cfg_ctx
8487 			       ->lro_enabled,
8488 			       pdev->soc->wlan_cfg_ctx
8489 			       ->rx_hash,
8490 			       pdev->soc->wlan_cfg_ctx
8491 			       ->napi_enabled);
8492 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
8493 		DP_TRACE_STATS(INFO_HIGH, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
8494 			       __func__,
8495 			       pdev->soc->wlan_cfg_ctx
8496 			       ->tx_flow_stop_queue_threshold,
8497 			       pdev->soc->wlan_cfg_ctx
8498 			       ->tx_flow_start_queue_offset);
8499 #endif
8500 	}
8501 }
8502 
8503 /*
8504  * dp_txrx_dump_stats() -  Dump statistics
8505  * @value - Statistics option
8506  */
8507 static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
8508 				     enum qdf_stats_verbosity_level level)
8509 {
8510 	struct dp_soc *soc =
8511 		(struct dp_soc *)psoc;
8512 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8513 
8514 	if (!soc) {
8515 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8516 			"%s: soc is NULL", __func__);
8517 		return QDF_STATUS_E_INVAL;
8518 	}
8519 
8520 	switch (value) {
8521 	case CDP_TXRX_PATH_STATS:
8522 		dp_txrx_path_stats(soc);
8523 		break;
8524 
8525 	case CDP_RX_RING_STATS:
8526 		dp_print_per_ring_stats(soc);
8527 		break;
8528 
8529 	case CDP_TXRX_TSO_STATS:
8530 		/* TODO: NOT IMPLEMENTED */
8531 		break;
8532 
8533 	case CDP_DUMP_TX_FLOW_POOL_INFO:
8534 		cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
8535 		break;
8536 
8537 	case CDP_DP_NAPI_STATS:
8538 		dp_print_napi_stats(soc);
8539 		break;
8540 
8541 	case CDP_TXRX_DESC_STATS:
8542 		/* TODO: NOT IMPLEMENTED */
8543 		break;
8544 
8545 	default:
8546 		status = QDF_STATUS_E_INVAL;
8547 		break;
8548 	}
8549 
8550 	return status;
8551 
8552 }
8553 
8554 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
8555 /**
8556  * dp_update_flow_control_parameters() - API to store datapath
8557  *                            config parameters
8558  * @soc: soc handle
8559  * @cfg: ini parameter handle
8560  *
8561  * Return: void
8562  */
8563 static inline
8564 void dp_update_flow_control_parameters(struct dp_soc *soc,
8565 				struct cdp_config_params *params)
8566 {
8567 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
8568 					params->tx_flow_stop_queue_threshold;
8569 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
8570 					params->tx_flow_start_queue_offset;
8571 }
8572 #else
8573 static inline
8574 void dp_update_flow_control_parameters(struct dp_soc *soc,
8575 				struct cdp_config_params *params)
8576 {
8577 }
8578 #endif
8579 
8580 /**
8581  * dp_update_config_parameters() - API to store datapath
8582  *                            config parameters
8583  * @soc: soc handle
8584  * @cfg: ini parameter handle
8585  *
8586  * Return: status
8587  */
8588 static
8589 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
8590 				struct cdp_config_params *params)
8591 {
8592 	struct dp_soc *soc = (struct dp_soc *)psoc;
8593 
8594 	if (!(soc)) {
8595 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8596 				"%s: Invalid handle", __func__);
8597 		return QDF_STATUS_E_INVAL;
8598 	}
8599 
8600 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
8601 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
8602 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
8603 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
8604 				params->tcp_udp_checksumoffload;
8605 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
8606 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
8607 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
8608 
8609 	dp_update_flow_control_parameters(soc, params);
8610 
8611 	return QDF_STATUS_SUCCESS;
8612 }
8613 
8614 /**
8615  * dp_txrx_set_wds_rx_policy() - API to store datapath
8616  *                            config parameters
8617  * @vdev_handle - datapath vdev handle
8618  * @cfg: ini parameter handle
8619  *
8620  * Return: status
8621  */
8622 #ifdef WDS_VENDOR_EXTENSION
8623 void
8624 dp_txrx_set_wds_rx_policy(
8625 		struct cdp_vdev *vdev_handle,
8626 		u_int32_t val)
8627 {
8628 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8629 	struct dp_peer *peer;
8630 	if (vdev->opmode == wlan_op_mode_ap) {
8631 		/* for ap, set it on bss_peer */
8632 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
8633 			if (peer->bss_peer) {
8634 				peer->wds_ecm.wds_rx_filter = 1;
8635 				peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
8636 				peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
8637 				break;
8638 			}
8639 		}
8640 	} else if (vdev->opmode == wlan_op_mode_sta) {
8641 		peer = TAILQ_FIRST(&vdev->peer_list);
8642 		peer->wds_ecm.wds_rx_filter = 1;
8643 		peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
8644 		peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
8645 	}
8646 }
8647 
8648 /**
8649  * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
8650  *
8651  * @peer_handle - datapath peer handle
8652  * @wds_tx_ucast: policy for unicast transmission
8653  * @wds_tx_mcast: policy for multicast transmission
8654  *
8655  * Return: void
8656  */
8657 void
8658 dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
8659 		int wds_tx_ucast, int wds_tx_mcast)
8660 {
8661 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8662 	if (wds_tx_ucast || wds_tx_mcast) {
8663 		peer->wds_enabled = 1;
8664 		peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
8665 		peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
8666 	} else {
8667 		peer->wds_enabled = 0;
8668 		peer->wds_ecm.wds_tx_ucast_4addr = 0;
8669 		peer->wds_ecm.wds_tx_mcast_4addr = 0;
8670 	}
8671 
8672 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8673 			FL("Policy Update set to :\
8674 				peer->wds_enabled %d\
8675 				peer->wds_ecm.wds_tx_ucast_4addr %d\
8676 				peer->wds_ecm.wds_tx_mcast_4addr %d"),
8677 				peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
8678 				peer->wds_ecm.wds_tx_mcast_4addr);
8679 	return;
8680 }
8681 #endif
8682 
8683 static struct cdp_wds_ops dp_ops_wds = {
8684 	.vdev_set_wds = dp_vdev_set_wds,
8685 #ifdef WDS_VENDOR_EXTENSION
8686 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
8687 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
8688 #endif
8689 };
8690 
8691 /*
8692  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
8693  * @vdev_handle - datapath vdev handle
8694  * @callback - callback function
8695  * @ctxt: callback context
8696  *
8697  */
8698 static void
8699 dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
8700 		       ol_txrx_data_tx_cb callback, void *ctxt)
8701 {
8702 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8703 
8704 	vdev->tx_non_std_data_callback.func = callback;
8705 	vdev->tx_non_std_data_callback.ctxt = ctxt;
8706 }
8707 
8708 /**
8709  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
8710  * @pdev_hdl: datapath pdev handle
8711  *
8712  * Return: opaque pointer to dp txrx handle
8713  */
8714 static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
8715 {
8716 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
8717 
8718 	return pdev->dp_txrx_handle;
8719 }
8720 
8721 /**
8722  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
8723  * @pdev_hdl: datapath pdev handle
8724  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
8725  *
8726  * Return: void
8727  */
8728 static void
8729 dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
8730 {
8731 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
8732 
8733 	pdev->dp_txrx_handle = dp_txrx_hdl;
8734 }
8735 
8736 /**
8737  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
8738  * @soc_handle: datapath soc handle
8739  *
8740  * Return: opaque pointer to external dp (non-core DP)
8741  */
8742 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
8743 {
8744 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8745 
8746 	return soc->external_txrx_handle;
8747 }
8748 
8749 /**
8750  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
8751  * @soc_handle: datapath soc handle
8752  * @txrx_handle: opaque pointer to external dp (non-core DP)
8753  *
8754  * Return: void
8755  */
8756 static void
8757 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
8758 {
8759 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8760 
8761 	soc->external_txrx_handle = txrx_handle;
8762 }
8763 
8764 /**
8765  * dp_get_cfg_capabilities() - get dp capabilities
8766  * @soc_handle: datapath soc handle
8767  * @dp_caps: enum for dp capabilities
8768  *
8769  * Return: bool to determine if dp caps is enabled
8770  */
8771 static bool
8772 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
8773 			enum cdp_capabilities dp_caps)
8774 {
8775 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8776 
8777 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
8778 }
8779 
8780 #ifdef FEATURE_AST
8781 static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
8782 {
8783 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
8784 	struct dp_peer *peer = (struct dp_peer *) peer_hdl;
8785 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
8786 
8787 	/*
8788 	 * For BSS peer, new peer is not created on alloc_node if the
8789 	 * peer with same address already exists , instead refcnt is
8790 	 * increased for existing peer. Correspondingly in delete path,
8791 	 * only refcnt is decreased; and peer is only deleted , when all
8792 	 * references are deleted. So delete_in_progress should not be set
8793 	 * for bss_peer, unless only 2 reference remains (peer map reference
8794 	 * and peer hash table reference).
8795 	 */
8796 	if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
8797 		return;
8798 	}
8799 
8800 	peer->delete_in_progress = true;
8801 	dp_peer_delete_ast_entries(soc, peer);
8802 }
8803 #endif
8804 
8805 #ifdef ATH_SUPPORT_NAC_RSSI
8806 /**
8807  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
8808  * @vdev_hdl: DP vdev handle
8809  * @rssi: rssi value
8810  *
8811  * Return: 0 for success. nonzero for failure.
8812  */
8813 static QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
8814 					      char *mac_addr,
8815 					      uint8_t *rssi)
8816 {
8817 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8818 	struct dp_pdev *pdev = vdev->pdev;
8819 	struct dp_neighbour_peer *peer = NULL;
8820 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
8821 
8822 	*rssi = 0;
8823 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
8824 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
8825 		      neighbour_peer_list_elem) {
8826 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
8827 				mac_addr, DP_MAC_ADDR_LEN) == 0) {
8828 			*rssi = peer->rssi;
8829 			status = QDF_STATUS_SUCCESS;
8830 			break;
8831 		}
8832 	}
8833 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
8834 	return status;
8835 }
8836 
8837 static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
8838 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
8839 		uint8_t chan_num)
8840 {
8841 
8842 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8843 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
8844 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
8845 
8846 	pdev->nac_rssi_filtering = 1;
8847 	/* Store address of NAC (neighbour peer) which will be checked
8848 	 * against TA of received packets.
8849 	 */
8850 
8851 	if (cmd == CDP_NAC_PARAM_ADD) {
8852 		dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
8853 						 client_macaddr);
8854 	} else if (cmd == CDP_NAC_PARAM_DEL) {
8855 		dp_update_filter_neighbour_peers(vdev_handle,
8856 						 DP_NAC_PARAM_DEL,
8857 						 client_macaddr);
8858 	}
8859 
8860 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
8861 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
8862 			((void *)vdev->pdev->ctrl_pdev,
8863 			 vdev->vdev_id, cmd, bssid);
8864 
8865 	return QDF_STATUS_SUCCESS;
8866 }
8867 #endif
8868 
8869 /**
8870  * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
8871  * for pktlog
8872  * @txrx_pdev_handle: cdp_pdev handle
8873  * @enb_dsb: Enable or disable peer based filtering
8874  *
8875  * Return: QDF_STATUS
8876  */
8877 static int
8878 dp_enable_peer_based_pktlog(
8879 	struct cdp_pdev *txrx_pdev_handle,
8880 	char *mac_addr, uint8_t enb_dsb)
8881 {
8882 	struct dp_peer *peer;
8883 	uint8_t local_id;
8884 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev_handle;
8885 
8886 	peer = (struct dp_peer *)dp_find_peer_by_addr(txrx_pdev_handle,
8887 			mac_addr, &local_id);
8888 
8889 	if (!peer) {
8890 		dp_err("Invalid Peer");
8891 		return QDF_STATUS_E_FAILURE;
8892 	}
8893 
8894 	peer->peer_based_pktlog_filter = enb_dsb;
8895 	pdev->dp_peer_based_pktlog = enb_dsb;
8896 
8897 	return QDF_STATUS_SUCCESS;
8898 }
8899 
8900 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
8901 					   uint32_t max_peers,
8902 					   bool peer_map_unmap_v2)
8903 {
8904 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8905 
8906 	soc->max_peers = max_peers;
8907 
8908 	qdf_print ("%s max_peers %u\n", __func__, max_peers);
8909 
8910 	if (dp_peer_find_attach(soc))
8911 		return QDF_STATUS_E_FAILURE;
8912 
8913 	soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
8914 
8915 	return QDF_STATUS_SUCCESS;
8916 }
8917 
8918 /**
8919  * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
8920  * @dp_pdev: dp pdev handle
8921  * @ctrl_pdev: UMAC ctrl pdev handle
8922  *
8923  * Return: void
8924  */
8925 static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
8926 				  struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
8927 {
8928 	struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
8929 
8930 	pdev->ctrl_pdev = ctrl_pdev;
8931 }
8932 
8933 /*
8934  * dp_get_cfg() - get dp cfg
8935  * @soc: cdp soc handle
8936  * @cfg: cfg enum
8937  *
8938  * Return: cfg value
8939  */
8940 static uint32_t dp_get_cfg(void *soc, enum cdp_dp_cfg cfg)
8941 {
8942 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
8943 	uint32_t value = 0;
8944 
8945 	switch (cfg) {
8946 	case cfg_dp_enable_data_stall:
8947 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
8948 		break;
8949 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
8950 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
8951 		break;
8952 	case cfg_dp_tso_enable:
8953 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
8954 		break;
8955 	case cfg_dp_lro_enable:
8956 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
8957 		break;
8958 	case cfg_dp_gro_enable:
8959 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
8960 		break;
8961 	case cfg_dp_tx_flow_start_queue_offset:
8962 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
8963 		break;
8964 	case cfg_dp_tx_flow_stop_queue_threshold:
8965 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
8966 		break;
8967 	case cfg_dp_disable_intra_bss_fwd:
8968 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
8969 		break;
8970 	default:
8971 		value =  0;
8972 	}
8973 
8974 	return value;
8975 }
8976 
8977 static struct cdp_cmn_ops dp_ops_cmn = {
8978 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
8979 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
8980 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
8981 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
8982 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
8983 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
8984 	.txrx_peer_create = dp_peer_create_wifi3,
8985 	.txrx_peer_setup = dp_peer_setup_wifi3,
8986 #ifdef FEATURE_AST
8987 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
8988 #else
8989 	.txrx_peer_teardown = NULL,
8990 #endif
8991 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
8992 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
8993 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
8994 	.txrx_peer_get_ast_info_by_pdev =
8995 		dp_peer_get_ast_info_by_pdevid_wifi3,
8996 	.txrx_peer_ast_delete_by_soc =
8997 		dp_peer_ast_entry_del_by_soc,
8998 	.txrx_peer_ast_delete_by_pdev =
8999 		dp_peer_ast_entry_del_by_pdev,
9000 	.txrx_peer_delete = dp_peer_delete_wifi3,
9001 	.txrx_vdev_register = dp_vdev_register_wifi3,
9002 	.txrx_soc_detach = dp_soc_detach_wifi3,
9003 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
9004 	.txrx_soc_init = dp_soc_init_wifi3,
9005 	.txrx_tso_soc_attach = dp_tso_soc_attach,
9006 	.txrx_tso_soc_detach = dp_tso_soc_detach,
9007 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
9008 	.txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
9009 	.txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
9010 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
9011 	.txrx_ath_getstats = dp_get_device_stats,
9012 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
9013 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
9014 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
9015 	.delba_process = dp_delba_process_wifi3,
9016 	.set_addba_response = dp_set_addba_response,
9017 	.get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
9018 	.flush_cache_rx_queue = NULL,
9019 	/* TODO: get API's for dscp-tid need to be added*/
9020 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
9021 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
9022 	.hmmc_tid_override_en = dp_hmmc_tid_override_en_wifi3,
9023 	.set_hmmc_tid_val = dp_set_hmmc_tid_val_wifi3,
9024 	.txrx_get_total_per = dp_get_total_per,
9025 	.txrx_stats_request = dp_txrx_stats_request,
9026 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
9027 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
9028 	.txrx_get_vow_config_frm_pdev = NULL,
9029 	.txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
9030 	.txrx_set_nac = dp_set_nac,
9031 	.txrx_get_tx_pending = dp_get_tx_pending,
9032 	.txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
9033 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
9034 	.display_stats = dp_txrx_dump_stats,
9035 	.txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
9036 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
9037 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
9038 	.txrx_intr_detach = dp_soc_interrupt_detach,
9039 	.set_pn_check = dp_set_pn_check_wifi3,
9040 	.update_config_parameters = dp_update_config_parameters,
9041 	/* TODO: Add other functions */
9042 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
9043 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
9044 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
9045 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
9046 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
9047 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
9048 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
9049 	.tx_send = dp_tx_send,
9050 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
9051 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
9052 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
9053 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
9054 	.txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
9055 	.txrx_get_os_rx_handles_from_vdev =
9056 					dp_get_os_rx_handles_from_vdev_wifi3,
9057 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
9058 	.get_dp_capabilities = dp_get_cfg_capabilities,
9059 	.txrx_get_cfg = dp_get_cfg,
9060 };
9061 
9062 static struct cdp_ctrl_ops dp_ops_ctrl = {
9063 	.txrx_peer_authorize = dp_peer_authorize,
9064 	.txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
9065 	.txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
9066 #ifdef MESH_MODE_SUPPORT
9067 	.txrx_set_mesh_mode  = dp_peer_set_mesh_mode,
9068 	.txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
9069 #endif
9070 	.txrx_set_vdev_param = dp_set_vdev_param,
9071 	.txrx_peer_set_nawds = dp_peer_set_nawds,
9072 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
9073 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
9074 	.txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
9075 	.txrx_update_filter_neighbour_peers =
9076 		dp_update_filter_neighbour_peers,
9077 	.txrx_get_sec_type = dp_get_sec_type,
9078 	/* TODO: Add other functions */
9079 	.txrx_wdi_event_sub = dp_wdi_event_sub,
9080 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
9081 #ifdef WDI_EVENT_ENABLE
9082 	.txrx_get_pldev = dp_get_pldev,
9083 #endif
9084 	.txrx_set_pdev_param = dp_set_pdev_param,
9085 #ifdef ATH_SUPPORT_NAC_RSSI
9086 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
9087 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
9088 #endif
9089 	.set_key = dp_set_michael_key,
9090 	.txrx_get_vdev_param = dp_get_vdev_param,
9091 	.enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
9092 };
9093 
9094 static struct cdp_me_ops dp_ops_me = {
9095 #ifdef ATH_SUPPORT_IQUE
9096 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
9097 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
9098 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
9099 #endif
9100 	.tx_me_find_ast_entry = NULL,
9101 };
9102 
9103 static struct cdp_mon_ops dp_ops_mon = {
9104 	.txrx_monitor_set_filter_ucast_data = NULL,
9105 	.txrx_monitor_set_filter_mcast_data = NULL,
9106 	.txrx_monitor_set_filter_non_data = NULL,
9107 	.txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
9108 	.txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
9109 	.txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
9110 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
9111 	/* Added support for HK advance filter */
9112 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
9113 };
9114 
9115 static struct cdp_host_stats_ops dp_ops_host_stats = {
9116 	.txrx_per_peer_stats = dp_get_host_peer_stats,
9117 	.get_fw_peer_stats = dp_get_fw_peer_stats,
9118 	.get_htt_stats = dp_get_htt_stats,
9119 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
9120 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
9121 	.txrx_stats_publish = dp_txrx_stats_publish,
9122 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
9123 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
9124 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
9125 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
9126 	/* TODO */
9127 };
9128 
9129 static struct cdp_raw_ops dp_ops_raw = {
9130 	/* TODO */
9131 };
9132 
9133 #ifdef CONFIG_WIN
9134 static struct cdp_pflow_ops dp_ops_pflow = {
9135 	/* TODO */
9136 };
9137 #endif /* CONFIG_WIN */
9138 
9139 #ifdef FEATURE_RUNTIME_PM
9140 /**
9141  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
9142  * @opaque_pdev: DP pdev context
9143  *
9144  * DP is ready to runtime suspend if there are no pending TX packets.
9145  *
9146  * Return: QDF_STATUS
9147  */
9148 static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
9149 {
9150 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
9151 	struct dp_soc *soc = pdev->soc;
9152 
9153 	/* Abort if there are any pending TX packets */
9154 	if (dp_get_tx_pending(opaque_pdev) > 0) {
9155 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
9156 			  FL("Abort suspend due to pending TX packets"));
9157 		return QDF_STATUS_E_AGAIN;
9158 	}
9159 
9160 	if (soc->intr_mode == DP_INTR_POLL)
9161 		qdf_timer_stop(&soc->int_timer);
9162 
9163 	return QDF_STATUS_SUCCESS;
9164 }
9165 
9166 /**
9167  * dp_runtime_resume() - ensure DP is ready to runtime resume
9168  * @opaque_pdev: DP pdev context
9169  *
9170  * Resume DP for runtime PM.
9171  *
9172  * Return: QDF_STATUS
9173  */
9174 static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
9175 {
9176 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
9177 	struct dp_soc *soc = pdev->soc;
9178 	void *hal_srng;
9179 	int i;
9180 
9181 	if (soc->intr_mode == DP_INTR_POLL)
9182 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
9183 
9184 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
9185 		hal_srng = soc->tcl_data_ring[i].hal_srng;
9186 		if (hal_srng) {
9187 			/* We actually only need to acquire the lock */
9188 			hal_srng_access_start(soc->hal_soc, hal_srng);
9189 			/* Update SRC ring head pointer for HW to send
9190 			   all pending packets */
9191 			hal_srng_access_end(soc->hal_soc, hal_srng);
9192 		}
9193 	}
9194 
9195 	return QDF_STATUS_SUCCESS;
9196 }
9197 #endif /* FEATURE_RUNTIME_PM */
9198 
9199 static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
9200 {
9201 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
9202 	struct dp_soc *soc = pdev->soc;
9203 	int timeout = SUSPEND_DRAIN_WAIT;
9204 	int drain_wait_delay = 50; /* 50 ms */
9205 
9206 	/* Abort if there are any pending TX packets */
9207 	while (dp_get_tx_pending(opaque_pdev) > 0) {
9208 		qdf_sleep(drain_wait_delay);
9209 		if (timeout <= 0) {
9210 			dp_err("TX frames are pending, abort suspend");
9211 			return QDF_STATUS_E_TIMEOUT;
9212 		}
9213 		timeout = timeout - drain_wait_delay;
9214 	}
9215 
9216 
9217 	if (soc->intr_mode == DP_INTR_POLL)
9218 		qdf_timer_stop(&soc->int_timer);
9219 
9220 	return QDF_STATUS_SUCCESS;
9221 }
9222 
9223 static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
9224 {
9225 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
9226 	struct dp_soc *soc = pdev->soc;
9227 
9228 	if (soc->intr_mode == DP_INTR_POLL)
9229 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
9230 
9231 	return QDF_STATUS_SUCCESS;
9232 }
9233 
9234 #ifndef CONFIG_WIN
9235 static struct cdp_misc_ops dp_ops_misc = {
9236 	.tx_non_std = dp_tx_non_std,
9237 	.get_opmode = dp_get_opmode,
9238 #ifdef FEATURE_RUNTIME_PM
9239 	.runtime_suspend = dp_runtime_suspend,
9240 	.runtime_resume = dp_runtime_resume,
9241 #endif /* FEATURE_RUNTIME_PM */
9242 	.pkt_log_init = dp_pkt_log_init,
9243 	.pkt_log_con_service = dp_pkt_log_con_service,
9244 	.get_num_rx_contexts = dp_get_num_rx_contexts,
9245 };
9246 
9247 static struct cdp_flowctl_ops dp_ops_flowctl = {
9248 	/* WIFI 3.0 DP implement as required. */
9249 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
9250 	.flow_pool_map_handler = dp_tx_flow_pool_map,
9251 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
9252 	.register_pause_cb = dp_txrx_register_pause_cb,
9253 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
9254 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
9255 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
9256 };
9257 
9258 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
9259 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9260 };
9261 
9262 #ifdef IPA_OFFLOAD
9263 static struct cdp_ipa_ops dp_ops_ipa = {
9264 	.ipa_get_resource = dp_ipa_get_resource,
9265 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
9266 	.ipa_op_response = dp_ipa_op_response,
9267 	.ipa_register_op_cb = dp_ipa_register_op_cb,
9268 	.ipa_get_stat = dp_ipa_get_stat,
9269 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
9270 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
9271 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
9272 	.ipa_setup = dp_ipa_setup,
9273 	.ipa_cleanup = dp_ipa_cleanup,
9274 	.ipa_setup_iface = dp_ipa_setup_iface,
9275 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
9276 	.ipa_enable_pipes = dp_ipa_enable_pipes,
9277 	.ipa_disable_pipes = dp_ipa_disable_pipes,
9278 	.ipa_set_perf_level = dp_ipa_set_perf_level
9279 };
9280 #endif
9281 
9282 static struct cdp_bus_ops dp_ops_bus = {
9283 	.bus_suspend = dp_bus_suspend,
9284 	.bus_resume = dp_bus_resume
9285 };
9286 
9287 static struct cdp_ocb_ops dp_ops_ocb = {
9288 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9289 };
9290 
9291 
9292 static struct cdp_throttle_ops dp_ops_throttle = {
9293 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9294 };
9295 
9296 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
9297 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9298 };
9299 
9300 static struct cdp_cfg_ops dp_ops_cfg = {
9301 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9302 };
9303 
9304 /*
9305  * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
9306  * @dev: physical device instance
9307  * @peer_mac_addr: peer mac address
9308  * @local_id: local id for the peer
9309  * @debug_id: to track enum peer access
9310  *
9311  * Return: peer instance pointer
9312  */
9313 static inline void *
9314 dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
9315 			     uint8_t *local_id,
9316 			     enum peer_debug_id_type debug_id)
9317 {
9318 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
9319 	struct dp_peer *peer;
9320 
9321 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
9322 
9323 	if (!peer)
9324 		return NULL;
9325 
9326 	*local_id = peer->local_id;
9327 	DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
9328 
9329 	return peer;
9330 }
9331 
9332 /*
9333  * dp_peer_release_ref - release peer ref count
9334  * @peer: peer handle
9335  * @debug_id: to track enum peer access
9336  *
9337  * Return: None
9338  */
9339 static inline
9340 void dp_peer_release_ref(void *peer, enum peer_debug_id_type debug_id)
9341 {
9342 	dp_peer_unref_delete(peer);
9343 }
9344 
9345 static struct cdp_peer_ops dp_ops_peer = {
9346 	.register_peer = dp_register_peer,
9347 	.clear_peer = dp_clear_peer,
9348 	.find_peer_by_addr = dp_find_peer_by_addr,
9349 	.find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
9350 	.peer_get_ref_by_addr = dp_peer_get_ref_find_by_addr,
9351 	.peer_release_ref = dp_peer_release_ref,
9352 	.local_peer_id = dp_local_peer_id,
9353 	.peer_find_by_local_id = dp_peer_find_by_local_id,
9354 	.peer_state_update = dp_peer_state_update,
9355 	.get_vdevid = dp_get_vdevid,
9356 	.get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
9357 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
9358 	.get_vdev_for_peer = dp_get_vdev_for_peer,
9359 	.get_peer_state = dp_get_peer_state,
9360 };
9361 #endif
9362 
9363 static struct cdp_ops dp_txrx_ops = {
9364 	.cmn_drv_ops = &dp_ops_cmn,
9365 	.ctrl_ops = &dp_ops_ctrl,
9366 	.me_ops = &dp_ops_me,
9367 	.mon_ops = &dp_ops_mon,
9368 	.host_stats_ops = &dp_ops_host_stats,
9369 	.wds_ops = &dp_ops_wds,
9370 	.raw_ops = &dp_ops_raw,
9371 #ifdef CONFIG_WIN
9372 	.pflow_ops = &dp_ops_pflow,
9373 #endif /* CONFIG_WIN */
9374 #ifndef CONFIG_WIN
9375 	.misc_ops = &dp_ops_misc,
9376 	.cfg_ops = &dp_ops_cfg,
9377 	.flowctl_ops = &dp_ops_flowctl,
9378 	.l_flowctl_ops = &dp_ops_l_flowctl,
9379 #ifdef IPA_OFFLOAD
9380 	.ipa_ops = &dp_ops_ipa,
9381 #endif
9382 	.bus_ops = &dp_ops_bus,
9383 	.ocb_ops = &dp_ops_ocb,
9384 	.peer_ops = &dp_ops_peer,
9385 	.throttle_ops = &dp_ops_throttle,
9386 	.mob_stats_ops = &dp_ops_mob_stats,
9387 #endif
9388 };
9389 
9390 /*
9391  * dp_soc_set_txrx_ring_map()
9392  * @dp_soc: DP handler for soc
9393  *
9394  * Return: Void
9395  */
9396 static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
9397 {
9398 	uint32_t i;
9399 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
9400 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
9401 	}
9402 }
9403 
9404 #ifdef QCA_WIFI_QCA8074
9405 
9406 #ifndef QCA_MEM_ATTACH_ON_WIFI3
9407 
9408 /**
9409  * dp_soc_attach_wifi3() - Attach txrx SOC
9410  * @ctrl_psoc: Opaque SOC handle from control plane
9411  * @htc_handle: Opaque HTC handle
9412  * @hif_handle: Opaque HIF handle
9413  * @qdf_osdev: QDF device
9414  * @ol_ops: Offload Operations
9415  * @device_id: Device ID
9416  *
9417  * Return: DP SOC handle on success, NULL on failure
9418  */
9419 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
9420 			  HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9421 			  struct ol_if_ops *ol_ops, uint16_t device_id)
9422 {
9423 	struct dp_soc *dp_soc =  NULL;
9424 
9425 	dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
9426 			       ol_ops, device_id);
9427 	if (!dp_soc)
9428 		return NULL;
9429 
9430 	if (!dp_soc_init(dp_soc, htc_handle, hif_handle))
9431 		return NULL;
9432 
9433 	return (void *)dp_soc;
9434 }
9435 #else
9436 
9437 /**
9438  * dp_soc_attach_wifi3() - Attach txrx SOC
9439  * @ctrl_psoc: Opaque SOC handle from control plane
9440  * @htc_handle: Opaque HTC handle
9441  * @hif_handle: Opaque HIF handle
9442  * @qdf_osdev: QDF device
9443  * @ol_ops: Offload Operations
9444  * @device_id: Device ID
9445  *
9446  * Return: DP SOC handle on success, NULL on failure
9447  */
9448 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
9449 			  HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9450 			  struct ol_if_ops *ol_ops, uint16_t device_id)
9451 {
9452 	struct dp_soc *dp_soc = NULL;
9453 
9454 	dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
9455 			       ol_ops, device_id);
9456 	return (void *)dp_soc;
9457 }
9458 
9459 #endif
9460 
9461 /**
9462  * dp_soc_attach() - Attach txrx SOC
9463  * @ctrl_psoc: Opaque SOC handle from control plane
9464  * @htc_handle: Opaque HTC handle
9465  * @qdf_osdev: QDF device
9466  * @ol_ops: Offload Operations
9467  * @device_id: Device ID
9468  *
9469  * Return: DP SOC handle on success, NULL on failure
9470  */
9471 static struct dp_soc *
9472 dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9473 	      struct ol_if_ops *ol_ops, uint16_t device_id)
9474 {
9475 	int int_ctx;
9476 	struct dp_soc *soc =  NULL;
9477 	struct htt_soc *htt_soc = NULL;
9478 
9479 	soc = qdf_mem_malloc(sizeof(*soc));
9480 
9481 	if (!soc) {
9482 		dp_err("DP SOC memory allocation failed");
9483 		goto fail0;
9484 	}
9485 
9486 	int_ctx = 0;
9487 	soc->device_id = device_id;
9488 	soc->cdp_soc.ops = &dp_txrx_ops;
9489 	soc->cdp_soc.ol_ops = ol_ops;
9490 	soc->ctrl_psoc = ctrl_psoc;
9491 	soc->osdev = qdf_osdev;
9492 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
9493 
9494 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
9495 	if (!soc->wlan_cfg_ctx) {
9496 		dp_err("wlan_cfg_ctx failed\n");
9497 		goto fail1;
9498 	}
9499 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
9500 	if (!htt_soc) {
9501 		dp_err("HTT attach failed");
9502 		goto fail1;
9503 	}
9504 	soc->htt_handle = htt_soc;
9505 	htt_soc->dp_soc = soc;
9506 	htt_soc->htc_soc = htc_handle;
9507 
9508 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
9509 		goto fail2;
9510 
9511 	return (void *)soc;
9512 fail2:
9513 	qdf_mem_free(htt_soc);
9514 fail1:
9515 	qdf_mem_free(soc);
9516 fail0:
9517 	return NULL;
9518 }
9519 
9520 /**
9521  * dp_soc_init() - Initialize txrx SOC
9522  * @dp_soc: Opaque DP SOC handle
9523  * @htc_handle: Opaque HTC handle
9524  * @hif_handle: Opaque HIF handle
9525  *
9526  * Return: DP SOC handle on success, NULL on failure
9527  */
9528 void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle)
9529 {
9530 	int target_type;
9531 	struct dp_soc *soc = (struct dp_soc *)dpsoc;
9532 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
9533 
9534 	htt_soc->htc_soc = htc_handle;
9535 	soc->hif_handle = hif_handle;
9536 
9537 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
9538 	if (!soc->hal_soc)
9539 		return NULL;
9540 
9541 	htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc, htt_soc->htc_soc,
9542 			   soc->hal_soc, soc->osdev);
9543 	target_type = hal_get_target_type(soc->hal_soc);
9544 	switch (target_type) {
9545 	case TARGET_TYPE_QCA6290:
9546 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9547 					       REO_DST_RING_SIZE_QCA6290);
9548 		soc->ast_override_support = 1;
9549 		break;
9550 #ifdef QCA_WIFI_QCA6390
9551 	case TARGET_TYPE_QCA6390:
9552 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9553 					       REO_DST_RING_SIZE_QCA6290);
9554 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
9555 		soc->ast_override_support = 1;
9556 		if (con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
9557 			int int_ctx;
9558 
9559 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
9560 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
9561 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
9562 			}
9563 		}
9564 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
9565 		break;
9566 #endif
9567 	case TARGET_TYPE_QCA8074:
9568 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9569 					       REO_DST_RING_SIZE_QCA8074);
9570 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
9571 		break;
9572 	case TARGET_TYPE_QCA8074V2:
9573 	case TARGET_TYPE_QCA6018:
9574 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9575 					       REO_DST_RING_SIZE_QCA8074);
9576 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
9577 		soc->hw_nac_monitor_support = 1;
9578 		soc->ast_override_support = 1;
9579 		soc->per_tid_basize_max_tid = 8;
9580 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
9581 		break;
9582 	default:
9583 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
9584 		qdf_assert_always(0);
9585 		break;
9586 	}
9587 
9588 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
9589 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
9590 	soc->cce_disable = false;
9591 
9592 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
9593 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
9594 				CDP_CFG_MAX_PEER_ID);
9595 
9596 		if (ret != -EINVAL) {
9597 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
9598 		}
9599 
9600 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
9601 				CDP_CFG_CCE_DISABLE);
9602 		if (ret == 1)
9603 			soc->cce_disable = true;
9604 	}
9605 
9606 	qdf_spinlock_create(&soc->peer_ref_mutex);
9607 	qdf_spinlock_create(&soc->ast_lock);
9608 	dp_soc_wds_attach(soc);
9609 
9610 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
9611 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
9612 
9613 	/* fill the tx/rx cpu ring map*/
9614 	dp_soc_set_txrx_ring_map(soc);
9615 
9616 	qdf_spinlock_create(&soc->htt_stats.lock);
9617 	/* initialize work queue for stats processing */
9618 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
9619 
9620 	return soc;
9621 
9622 }
9623 
9624 /**
9625  * dp_soc_init_wifi3() - Initialize txrx SOC
9626  * @dp_soc: Opaque DP SOC handle
9627  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
9628  * @hif_handle: Opaque HIF handle
9629  * @htc_handle: Opaque HTC handle
9630  * @qdf_osdev: QDF device (Unused)
9631  * @ol_ops: Offload Operations (Unused)
9632  * @device_id: Device ID (Unused)
9633  *
9634  * Return: DP SOC handle on success, NULL on failure
9635  */
9636 void *dp_soc_init_wifi3(void *dpsoc, void *ctrl_psoc, void *hif_handle,
9637 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9638 			struct ol_if_ops *ol_ops, uint16_t device_id)
9639 {
9640 	return dp_soc_init(dpsoc, htc_handle, hif_handle);
9641 }
9642 
9643 #endif
9644 
9645 /*
9646  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
9647  *
9648  * @soc: handle to DP soc
9649  * @mac_id: MAC id
9650  *
9651  * Return: Return pdev corresponding to MAC
9652  */
9653 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
9654 {
9655 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
9656 		return soc->pdev_list[mac_id];
9657 
9658 	/* Typically for MCL as there only 1 PDEV*/
9659 	return soc->pdev_list[0];
9660 }
9661 
9662 /*
9663  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
9664  * @soc:		DP SoC context
9665  * @max_mac_rings:	No of MAC rings
9666  *
9667  * Return: None
9668  */
9669 static
9670 void dp_is_hw_dbs_enable(struct dp_soc *soc,
9671 				int *max_mac_rings)
9672 {
9673 	bool dbs_enable = false;
9674 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
9675 		dbs_enable = soc->cdp_soc.ol_ops->
9676 		is_hw_dbs_2x2_capable(soc->ctrl_psoc);
9677 
9678 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
9679 }
9680 
9681 /*
9682 * dp_set_pktlog_wifi3() - attach txrx vdev
9683 * @pdev: Datapath PDEV handle
9684 * @event: which event's notifications are being subscribed to
9685 * @enable: WDI event subscribe or not. (True or False)
9686 *
9687 * Return: Success, NULL on failure
9688 */
9689 #ifdef WDI_EVENT_ENABLE
9690 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
9691 	bool enable)
9692 {
9693 	struct dp_soc *soc = NULL;
9694 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
9695 	int max_mac_rings = wlan_cfg_get_num_mac_rings
9696 					(pdev->wlan_cfg_ctx);
9697 	uint8_t mac_id = 0;
9698 
9699 	soc = pdev->soc;
9700 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
9701 
9702 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
9703 			FL("Max_mac_rings %d "),
9704 			max_mac_rings);
9705 
9706 	if (enable) {
9707 		switch (event) {
9708 		case WDI_EVENT_RX_DESC:
9709 			if (pdev->monitor_vdev) {
9710 				/* Nothing needs to be done if monitor mode is
9711 				 * enabled
9712 				 */
9713 				return 0;
9714 			}
9715 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
9716 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
9717 				htt_tlv_filter.mpdu_start = 1;
9718 				htt_tlv_filter.msdu_start = 1;
9719 				htt_tlv_filter.msdu_end = 1;
9720 				htt_tlv_filter.mpdu_end = 1;
9721 				htt_tlv_filter.packet_header = 1;
9722 				htt_tlv_filter.attention = 1;
9723 				htt_tlv_filter.ppdu_start = 1;
9724 				htt_tlv_filter.ppdu_end = 1;
9725 				htt_tlv_filter.ppdu_end_user_stats = 1;
9726 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
9727 				htt_tlv_filter.ppdu_end_status_done = 1;
9728 				htt_tlv_filter.enable_fp = 1;
9729 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
9730 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
9731 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
9732 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
9733 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
9734 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
9735 
9736 				for (mac_id = 0; mac_id < max_mac_rings;
9737 								mac_id++) {
9738 					int mac_for_pdev =
9739 						dp_get_mac_id_for_pdev(mac_id,
9740 								pdev->pdev_id);
9741 
9742 					htt_h2t_rx_ring_cfg(soc->htt_handle,
9743 					 mac_for_pdev,
9744 					 pdev->rxdma_mon_status_ring[mac_id]
9745 					 .hal_srng,
9746 					 RXDMA_MONITOR_STATUS,
9747 					 RX_BUFFER_SIZE,
9748 					 &htt_tlv_filter);
9749 
9750 				}
9751 
9752 				if (soc->reap_timer_init)
9753 					qdf_timer_mod(&soc->mon_reap_timer,
9754 					DP_INTR_POLL_TIMER_MS);
9755 			}
9756 			break;
9757 
9758 		case WDI_EVENT_LITE_RX:
9759 			if (pdev->monitor_vdev) {
9760 				/* Nothing needs to be done if monitor mode is
9761 				 * enabled
9762 				 */
9763 				return 0;
9764 			}
9765 
9766 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
9767 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
9768 
9769 				htt_tlv_filter.ppdu_start = 1;
9770 				htt_tlv_filter.ppdu_end = 1;
9771 				htt_tlv_filter.ppdu_end_user_stats = 1;
9772 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
9773 				htt_tlv_filter.ppdu_end_status_done = 1;
9774 				htt_tlv_filter.mpdu_start = 1;
9775 				htt_tlv_filter.enable_fp = 1;
9776 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
9777 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
9778 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
9779 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
9780 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
9781 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
9782 
9783 				for (mac_id = 0; mac_id < max_mac_rings;
9784 								mac_id++) {
9785 					int mac_for_pdev =
9786 						dp_get_mac_id_for_pdev(mac_id,
9787 								pdev->pdev_id);
9788 
9789 					htt_h2t_rx_ring_cfg(soc->htt_handle,
9790 					mac_for_pdev,
9791 					pdev->rxdma_mon_status_ring[mac_id]
9792 					.hal_srng,
9793 					RXDMA_MONITOR_STATUS,
9794 					RX_BUFFER_SIZE_PKTLOG_LITE,
9795 					&htt_tlv_filter);
9796 				}
9797 
9798 				if (soc->reap_timer_init)
9799 					qdf_timer_mod(&soc->mon_reap_timer,
9800 					DP_INTR_POLL_TIMER_MS);
9801 			}
9802 			break;
9803 
9804 		case WDI_EVENT_LITE_T2H:
9805 			if (pdev->monitor_vdev) {
9806 				/* Nothing needs to be done if monitor mode is
9807 				 * enabled
9808 				 */
9809 				return 0;
9810 			}
9811 
9812 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
9813 				int mac_for_pdev = dp_get_mac_id_for_pdev(
9814 							mac_id,	pdev->pdev_id);
9815 
9816 				pdev->pktlog_ppdu_stats = true;
9817 				dp_h2t_cfg_stats_msg_send(pdev,
9818 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
9819 					mac_for_pdev);
9820 			}
9821 			break;
9822 
9823 		default:
9824 			/* Nothing needs to be done for other pktlog types */
9825 			break;
9826 		}
9827 	} else {
9828 		switch (event) {
9829 		case WDI_EVENT_RX_DESC:
9830 		case WDI_EVENT_LITE_RX:
9831 			if (pdev->monitor_vdev) {
9832 				/* Nothing needs to be done if monitor mode is
9833 				 * enabled
9834 				 */
9835 				return 0;
9836 			}
9837 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
9838 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
9839 
9840 				for (mac_id = 0; mac_id < max_mac_rings;
9841 								mac_id++) {
9842 					int mac_for_pdev =
9843 						dp_get_mac_id_for_pdev(mac_id,
9844 								pdev->pdev_id);
9845 
9846 					htt_h2t_rx_ring_cfg(soc->htt_handle,
9847 					  mac_for_pdev,
9848 					  pdev->rxdma_mon_status_ring[mac_id]
9849 					  .hal_srng,
9850 					  RXDMA_MONITOR_STATUS,
9851 					  RX_BUFFER_SIZE,
9852 					  &htt_tlv_filter);
9853 				}
9854 
9855 				if (soc->reap_timer_init)
9856 					qdf_timer_stop(&soc->mon_reap_timer);
9857 			}
9858 			break;
9859 		case WDI_EVENT_LITE_T2H:
9860 			if (pdev->monitor_vdev) {
9861 				/* Nothing needs to be done if monitor mode is
9862 				 * enabled
9863 				 */
9864 				return 0;
9865 			}
9866 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
9867 			 * passing value 0. Once these macros will define in htt
9868 			 * header file will use proper macros
9869 			*/
9870 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
9871 				int mac_for_pdev =
9872 						dp_get_mac_id_for_pdev(mac_id,
9873 								pdev->pdev_id);
9874 
9875 				pdev->pktlog_ppdu_stats = false;
9876 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
9877 					dp_h2t_cfg_stats_msg_send(pdev, 0,
9878 								mac_for_pdev);
9879 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
9880 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
9881 								mac_for_pdev);
9882 				} else if (pdev->enhanced_stats_en) {
9883 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
9884 								mac_for_pdev);
9885 				}
9886 			}
9887 
9888 			break;
9889 		default:
9890 			/* Nothing needs to be done for other pktlog types */
9891 			break;
9892 		}
9893 	}
9894 	return 0;
9895 }
9896 #endif
9897