xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 1397a33f48ea6455be40871470b286e535820eb8)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_htt.h"
31 #include "dp_types.h"
32 #include "dp_internal.h"
33 #include "dp_tx.h"
34 #include "dp_tx_desc.h"
35 #include "dp_rx.h"
36 #include <cdp_txrx_handle.h>
37 #include <wlan_cfg.h>
38 #include "cdp_txrx_cmn_struct.h"
39 #include "cdp_txrx_stats_struct.h"
40 #include "cdp_txrx_cmn_reg.h"
41 #include <qdf_util.h>
42 #include "dp_peer.h"
43 #include "dp_rx_mon.h"
44 #include "htt_stats.h"
45 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
46 #include "cfg_ucfg_api.h"
47 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
48 #include "cdp_txrx_flow_ctrl_v2.h"
49 #else
50 static inline void
51 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
52 {
53 	return;
54 }
55 #endif
56 #include "dp_ipa.h"
57 #include "dp_cal_client_api.h"
58 #ifdef CONFIG_MCL
59 extern int con_mode_monitor;
60 #ifndef REMOVE_PKT_LOG
61 #include <pktlog_ac_api.h>
62 #include <pktlog_ac.h>
63 #endif
64 #endif
65 void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle);
66 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
67 static struct dp_soc *
68 dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
69 	      struct ol_if_ops *ol_ops, uint16_t device_id);
70 static void dp_pktlogmod_exit(struct dp_pdev *handle);
71 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
72 				uint8_t *peer_mac_addr,
73 				struct cdp_ctrl_objmgr_peer *ctrl_peer);
74 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
75 static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
76 static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
77 
78 #define DP_INTR_POLL_TIMER_MS	10
79 /* Generic AST entry aging timer value */
80 #define DP_AST_AGING_TIMER_DEFAULT_MS	1000
81 /* WDS AST entry aging timer value */
82 #define DP_WDS_AST_AGING_TIMER_DEFAULT_MS	120000
83 #define DP_WDS_AST_AGING_TIMER_CNT \
84 ((DP_WDS_AST_AGING_TIMER_DEFAULT_MS / DP_AST_AGING_TIMER_DEFAULT_MS) - 1)
85 #define DP_MCS_LENGTH (6*MAX_MCS)
86 #define DP_NSS_LENGTH (6*SS_COUNT)
87 #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
88 #define DP_MAX_INT_CONTEXTS_STRING_LENGTH (6 * WLAN_CFG_INT_NUM_CONTEXTS)
89 #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
90 #define DP_MAX_MCS_STRING_LEN 30
91 #define DP_CURR_FW_STATS_AVAIL 19
92 #define DP_HTT_DBG_EXT_STATS_MAX 256
93 #define DP_MAX_SLEEP_TIME 100
94 #ifndef QCA_WIFI_3_0_EMU
95 #define SUSPEND_DRAIN_WAIT 500
96 #else
97 #define SUSPEND_DRAIN_WAIT 3000
98 #endif
99 
100 #ifdef IPA_OFFLOAD
101 /* Exclude IPA rings from the interrupt context */
102 #define TX_RING_MASK_VAL	0xb
103 #define RX_RING_MASK_VAL	0x7
104 #else
105 #define TX_RING_MASK_VAL	0xF
106 #define RX_RING_MASK_VAL	0xF
107 #endif
108 
109 #define STR_MAXLEN	64
110 
111 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
112 
113 /* PPDU stats mask sent to FW to enable enhanced stats */
114 #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
115 /* PPDU stats mask sent to FW to support debug sniffer feature */
116 #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
117 /* PPDU stats mask sent to FW to support BPR feature*/
118 #define DP_PPDU_STATS_CFG_BPR 0x2000
119 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
120 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
121 				   DP_PPDU_STATS_CFG_ENH_STATS)
122 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
123 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
124 				      DP_PPDU_TXLITE_STATS_BITMASK_CFG)
125 
126 #define RNG_ERR		"SRNG setup failed for"
127 /**
128  * default_dscp_tid_map - Default DSCP-TID mapping
129  *
130  * DSCP        TID
131  * 000000      0
132  * 001000      1
133  * 010000      2
134  * 011000      3
135  * 100000      4
136  * 101000      5
137  * 110000      6
138  * 111000      7
139  */
140 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
141 	0, 0, 0, 0, 0, 0, 0, 0,
142 	1, 1, 1, 1, 1, 1, 1, 1,
143 	2, 2, 2, 2, 2, 2, 2, 2,
144 	3, 3, 3, 3, 3, 3, 3, 3,
145 	4, 4, 4, 4, 4, 4, 4, 4,
146 	5, 5, 5, 5, 5, 5, 5, 5,
147 	6, 6, 6, 6, 6, 6, 6, 6,
148 	7, 7, 7, 7, 7, 7, 7, 7,
149 };
150 
151 /*
152  * struct dp_rate_debug
153  *
154  * @mcs_type: print string for a given mcs
155  * @valid: valid mcs rate?
156  */
157 struct dp_rate_debug {
158 	char mcs_type[DP_MAX_MCS_STRING_LEN];
159 	uint8_t valid;
160 };
161 
162 #define MCS_VALID 1
163 #define MCS_INVALID 0
164 
165 static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
166 
167 	{
168 		{"OFDM 48 Mbps", MCS_VALID},
169 		{"OFDM 24 Mbps", MCS_VALID},
170 		{"OFDM 12 Mbps", MCS_VALID},
171 		{"OFDM 6 Mbps ", MCS_VALID},
172 		{"OFDM 54 Mbps", MCS_VALID},
173 		{"OFDM 36 Mbps", MCS_VALID},
174 		{"OFDM 18 Mbps", MCS_VALID},
175 		{"OFDM 9 Mbps ", MCS_VALID},
176 		{"INVALID ", MCS_INVALID},
177 		{"INVALID ", MCS_INVALID},
178 		{"INVALID ", MCS_INVALID},
179 		{"INVALID ", MCS_INVALID},
180 		{"INVALID ", MCS_VALID},
181 	},
182 	{
183 		{"CCK 11 Mbps Long  ", MCS_VALID},
184 		{"CCK 5.5 Mbps Long ", MCS_VALID},
185 		{"CCK 2 Mbps Long   ", MCS_VALID},
186 		{"CCK 1 Mbps Long   ", MCS_VALID},
187 		{"CCK 11 Mbps Short ", MCS_VALID},
188 		{"CCK 5.5 Mbps Short", MCS_VALID},
189 		{"CCK 2 Mbps Short  ", MCS_VALID},
190 		{"INVALID ", MCS_INVALID},
191 		{"INVALID ", MCS_INVALID},
192 		{"INVALID ", MCS_INVALID},
193 		{"INVALID ", MCS_INVALID},
194 		{"INVALID ", MCS_INVALID},
195 		{"INVALID ", MCS_VALID},
196 	},
197 	{
198 		{"HT MCS 0 (BPSK 1/2)  ", MCS_VALID},
199 		{"HT MCS 1 (QPSK 1/2)  ", MCS_VALID},
200 		{"HT MCS 2 (QPSK 3/4)  ", MCS_VALID},
201 		{"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
202 		{"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
203 		{"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
204 		{"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
205 		{"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
206 		{"INVALID ", MCS_INVALID},
207 		{"INVALID ", MCS_INVALID},
208 		{"INVALID ", MCS_INVALID},
209 		{"INVALID ", MCS_INVALID},
210 		{"INVALID ", MCS_VALID},
211 	},
212 	{
213 		{"VHT MCS 0 (BPSK 1/2)     ", MCS_VALID},
214 		{"VHT MCS 1 (QPSK 1/2)     ", MCS_VALID},
215 		{"VHT MCS 2 (QPSK 3/4)     ", MCS_VALID},
216 		{"VHT MCS 3 (16-QAM 1/2)   ", MCS_VALID},
217 		{"VHT MCS 4 (16-QAM 3/4)   ", MCS_VALID},
218 		{"VHT MCS 5 (64-QAM 2/3)   ", MCS_VALID},
219 		{"VHT MCS 6 (64-QAM 3/4)   ", MCS_VALID},
220 		{"VHT MCS 7 (64-QAM 5/6)   ", MCS_VALID},
221 		{"VHT MCS 8 (256-QAM 3/4)  ", MCS_VALID},
222 		{"VHT MCS 9 (256-QAM 5/6)  ", MCS_VALID},
223 		{"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
224 		{"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
225 		{"INVALID ", MCS_VALID},
226 	},
227 	{
228 		{"HE MCS 0 (BPSK 1/2)     ", MCS_VALID},
229 		{"HE MCS 1 (QPSK 1/2)     ", MCS_VALID},
230 		{"HE MCS 2 (QPSK 3/4)     ", MCS_VALID},
231 		{"HE MCS 3 (16-QAM 1/2)   ", MCS_VALID},
232 		{"HE MCS 4 (16-QAM 3/4)   ", MCS_VALID},
233 		{"HE MCS 5 (64-QAM 2/3)   ", MCS_VALID},
234 		{"HE MCS 6 (64-QAM 3/4)   ", MCS_VALID},
235 		{"HE MCS 7 (64-QAM 5/6)   ", MCS_VALID},
236 		{"HE MCS 8 (256-QAM 3/4)  ", MCS_VALID},
237 		{"HE MCS 9 (256-QAM 5/6)  ", MCS_VALID},
238 		{"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
239 		{"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
240 		{"INVALID ", MCS_VALID},
241 	}
242 };
243 
244 /**
245  * dp_cpu_ring_map_type - dp tx cpu ring map
246  * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded
247  * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded
248  * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded
249  * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded
250  * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded
251  * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val
252  */
253 enum dp_cpu_ring_map_types {
254 	DP_NSS_DEFAULT_MAP,
255 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
256 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
257 	DP_NSS_DBDC_OFFLOADED_MAP,
258 	DP_NSS_DBTC_OFFLOADED_MAP,
259 	DP_NSS_CPU_RING_MAP_MAX
260 };
261 
262 /**
263  * @brief Cpu to tx ring map
264  */
265 #ifdef CONFIG_WIN
266 static uint8_t
267 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
268 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
269 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
270 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
271 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
272 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
273 };
274 #else
275 static uint8_t
276 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
277 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
278 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
279 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
280 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
281 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
282 };
283 #endif
284 
285 /**
286  * @brief Select the type of statistics
287  */
288 enum dp_stats_type {
289 	STATS_FW = 0,
290 	STATS_HOST = 1,
291 	STATS_TYPE_MAX = 2,
292 };
293 
294 /**
295  * @brief General Firmware statistics options
296  *
297  */
298 enum dp_fw_stats {
299 	TXRX_FW_STATS_INVALID	= -1,
300 };
301 
302 /**
303  * dp_stats_mapping_table - Firmware and Host statistics
304  * currently supported
305  */
306 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
307 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
308 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
309 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
310 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
311 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
312 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
313 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
314 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
315 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
316 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
317 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
318 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
319 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
320 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
321 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
322 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
323 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
324 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
325 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
326 	/* Last ENUM for HTT FW STATS */
327 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
328 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
329 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
330 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
331 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
332 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
333 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
334 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
335 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
336 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
337 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
338 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
339 };
340 
341 /* MCL specific functions */
342 #ifdef CONFIG_MCL
343 /**
344  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
345  * @soc: pointer to dp_soc handle
346  * @intr_ctx_num: interrupt context number for which mon mask is needed
347  *
348  * For MCL, monitor mode rings are being processed in timer contexts (polled).
349  * This function is returning 0, since in interrupt mode(softirq based RX),
350  * we donot want to process monitor mode rings in a softirq.
351  *
352  * So, in case packet log is enabled for SAP/STA/P2P modes,
353  * regular interrupt processing will not process monitor mode rings. It would be
354  * done in a separate timer context.
355  *
356  * Return: 0
357  */
358 static inline
359 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
360 {
361 	return 0;
362 }
363 
364 /*
365  * dp_service_mon_rings()- timer to reap monitor rings
366  * reqd as we are not getting ppdu end interrupts
367  * @arg: SoC Handle
368  *
369  * Return:
370  *
371  */
372 static void dp_service_mon_rings(void *arg)
373 {
374 	struct dp_soc *soc = (struct dp_soc *)arg;
375 	int ring = 0, work_done, mac_id;
376 	struct dp_pdev *pdev = NULL;
377 
378 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
379 		pdev = soc->pdev_list[ring];
380 		if (!pdev)
381 			continue;
382 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
383 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
384 								pdev->pdev_id);
385 			work_done = dp_mon_process(soc, mac_for_pdev,
386 						   QCA_NAPI_BUDGET);
387 
388 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
389 				  FL("Reaped %d descs from Monitor rings"),
390 				  work_done);
391 		}
392 	}
393 
394 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
395 }
396 
397 #ifndef REMOVE_PKT_LOG
398 /**
399  * dp_pkt_log_init() - API to initialize packet log
400  * @ppdev: physical device handle
401  * @scn: HIF context
402  *
403  * Return: none
404  */
405 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
406 {
407 	struct dp_pdev *handle = (struct dp_pdev *)ppdev;
408 
409 	if (handle->pkt_log_init) {
410 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
411 			  "%s: Packet log not initialized", __func__);
412 		return;
413 	}
414 
415 	pktlog_sethandle(&handle->pl_dev, scn);
416 	pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
417 
418 	if (pktlogmod_init(scn)) {
419 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
420 			  "%s: pktlogmod_init failed", __func__);
421 		handle->pkt_log_init = false;
422 	} else {
423 		handle->pkt_log_init = true;
424 	}
425 }
426 
427 /**
428  * dp_pkt_log_con_service() - connect packet log service
429  * @ppdev: physical device handle
430  * @scn: device context
431  *
432  * Return: none
433  */
434 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
435 {
436 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
437 
438 	dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
439 	pktlog_htc_attach();
440 }
441 
442 /**
443  * dp_get_num_rx_contexts() - get number of RX contexts
444  * @soc_hdl: cdp opaque soc handle
445  *
446  * Return: number of RX contexts
447  */
448 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
449 {
450 	int i;
451 	int num_rx_contexts = 0;
452 
453 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
454 
455 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
456 		if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
457 			num_rx_contexts++;
458 
459 	return num_rx_contexts;
460 }
461 
462 /**
463  * dp_pktlogmod_exit() - API to cleanup pktlog info
464  * @handle: Pdev handle
465  *
466  * Return: none
467  */
468 static void dp_pktlogmod_exit(struct dp_pdev *handle)
469 {
470 	void *scn = (void *)handle->soc->hif_handle;
471 
472 	if (!scn) {
473 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
474 			  "%s: Invalid hif(scn) handle", __func__);
475 		return;
476 	}
477 
478 	pktlogmod_exit(scn);
479 	handle->pkt_log_init = false;
480 }
481 #endif
482 #else
483 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
484 
485 /**
486  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
487  * @soc: pointer to dp_soc handle
488  * @intr_ctx_num: interrupt context number for which mon mask is needed
489  *
490  * Return: mon mask value
491  */
492 static inline
493 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
494 {
495 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
496 }
497 #endif
498 
499 /**
500  * dp_get_dp_vdev_from_cdp_vdev() - get dp_vdev from cdp_vdev by type-casting
501  * @cdp_opaque_vdev: pointer to cdp_vdev
502  *
503  * Return: pointer to dp_vdev
504  */
505 static
506 struct dp_vdev *dp_get_dp_vdev_from_cdp_vdev(struct cdp_vdev *cdp_opaque_vdev)
507 {
508 	return (struct dp_vdev *)cdp_opaque_vdev;
509 }
510 
511 
512 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
513 					struct cdp_peer *peer_hdl,
514 					uint8_t *mac_addr,
515 					enum cdp_txrx_ast_entry_type type,
516 					uint32_t flags)
517 {
518 
519 	return dp_peer_add_ast((struct dp_soc *)soc_hdl,
520 				(struct dp_peer *)peer_hdl,
521 				mac_addr,
522 				type,
523 				flags);
524 }
525 
526 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
527 						struct cdp_peer *peer_hdl,
528 						uint8_t *wds_macaddr,
529 						uint32_t flags)
530 {
531 	int status = -1;
532 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
533 	struct dp_ast_entry  *ast_entry = NULL;
534 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
535 
536 	qdf_spin_lock_bh(&soc->ast_lock);
537 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
538 						    peer->vdev->pdev->pdev_id);
539 
540 	if (ast_entry) {
541 		status = dp_peer_update_ast(soc,
542 					    peer,
543 					    ast_entry, flags);
544 	}
545 
546 	qdf_spin_unlock_bh(&soc->ast_lock);
547 
548 	return status;
549 }
550 
551 /*
552  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
553  * @soc_handle:		Datapath SOC handle
554  * @wds_macaddr:	WDS entry MAC Address
555  * Return: None
556  */
557 static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
558 				   uint8_t *wds_macaddr, void *vdev_handle)
559 {
560 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
561 	struct dp_ast_entry *ast_entry = NULL;
562 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
563 
564 	qdf_spin_lock_bh(&soc->ast_lock);
565 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
566 						    vdev->pdev->pdev_id);
567 
568 	if (ast_entry) {
569 		if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
570 			(ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
571 			(ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS)) {
572 			ast_entry->is_active = TRUE;
573 		}
574 	}
575 
576 	qdf_spin_unlock_bh(&soc->ast_lock);
577 }
578 
579 /*
580  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
581  * @soc:		Datapath SOC handle
582  *
583  * Return: None
584  */
585 static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
586 					 void *vdev_hdl)
587 {
588 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
589 	struct dp_pdev *pdev;
590 	struct dp_vdev *vdev;
591 	struct dp_peer *peer;
592 	struct dp_ast_entry *ase, *temp_ase;
593 	int i;
594 
595 	qdf_spin_lock_bh(&soc->ast_lock);
596 
597 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
598 		pdev = soc->pdev_list[i];
599 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
600 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
601 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
602 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
603 					if ((ase->type ==
604 						CDP_TXRX_AST_TYPE_STATIC) ||
605 						(ase->type ==
606 						CDP_TXRX_AST_TYPE_SELF) ||
607 						(ase->type ==
608 						CDP_TXRX_AST_TYPE_STA_BSS))
609 						continue;
610 					ase->is_active = TRUE;
611 				}
612 			}
613 		}
614 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
615 	}
616 
617 	qdf_spin_unlock_bh(&soc->ast_lock);
618 }
619 
620 /*
621  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
622  * @soc:		Datapath SOC handle
623  *
624  * Return: None
625  */
626 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
627 {
628 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
629 	struct dp_pdev *pdev;
630 	struct dp_vdev *vdev;
631 	struct dp_peer *peer;
632 	struct dp_ast_entry *ase, *temp_ase;
633 	int i;
634 
635 	qdf_spin_lock_bh(&soc->ast_lock);
636 
637 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
638 		pdev = soc->pdev_list[i];
639 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
640 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
641 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
642 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
643 					if ((ase->type ==
644 						CDP_TXRX_AST_TYPE_STATIC) ||
645 						(ase->type ==
646 						 CDP_TXRX_AST_TYPE_SELF) ||
647 						(ase->type ==
648 						 CDP_TXRX_AST_TYPE_STA_BSS))
649 						continue;
650 					dp_peer_del_ast(soc, ase);
651 				}
652 			}
653 		}
654 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
655 	}
656 
657 	qdf_spin_unlock_bh(&soc->ast_lock);
658 }
659 
660 /**
661  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
662  *                                       and return ast entry information
663  *                                       of first ast entry found in the
664  *                                       table with given mac address
665  *
666  * @soc : data path soc handle
667  * @ast_mac_addr : AST entry mac address
668  * @ast_entry_info : ast entry information
669  *
670  * return : true if ast entry found with ast_mac_addr
671  *          false if ast entry not found
672  */
673 static bool dp_peer_get_ast_info_by_soc_wifi3
674 	(struct cdp_soc_t *soc_hdl,
675 	 uint8_t *ast_mac_addr,
676 	 struct cdp_ast_entry_info *ast_entry_info)
677 {
678 	struct dp_ast_entry *ast_entry;
679 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
680 
681 	qdf_spin_lock_bh(&soc->ast_lock);
682 
683 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
684 
685 	if (ast_entry && !ast_entry->delete_in_progress) {
686 		ast_entry_info->type = ast_entry->type;
687 		ast_entry_info->pdev_id = ast_entry->pdev_id;
688 		ast_entry_info->vdev_id = ast_entry->vdev_id;
689 		ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
690 		qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
691 			     &ast_entry->peer->mac_addr.raw[0],
692 			     DP_MAC_ADDR_LEN);
693 		qdf_spin_unlock_bh(&soc->ast_lock);
694 		return true;
695 	}
696 
697 	qdf_spin_unlock_bh(&soc->ast_lock);
698 	return false;
699 }
700 
701 /**
702  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
703  *                                          and return ast entry information
704  *                                          if mac address and pdev_id matches
705  *
706  * @soc : data path soc handle
707  * @ast_mac_addr : AST entry mac address
708  * @pdev_id : pdev_id
709  * @ast_entry_info : ast entry information
710  *
711  * return : true if ast entry found with ast_mac_addr
712  *          false if ast entry not found
713  */
714 static bool dp_peer_get_ast_info_by_pdevid_wifi3
715 		(struct cdp_soc_t *soc_hdl,
716 		 uint8_t *ast_mac_addr,
717 		 uint8_t pdev_id,
718 		 struct cdp_ast_entry_info *ast_entry_info)
719 {
720 	struct dp_ast_entry *ast_entry;
721 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
722 
723 	qdf_spin_lock_bh(&soc->ast_lock);
724 
725 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, pdev_id);
726 
727 	if (ast_entry && !ast_entry->delete_in_progress) {
728 		ast_entry_info->type = ast_entry->type;
729 		ast_entry_info->pdev_id = ast_entry->pdev_id;
730 		ast_entry_info->vdev_id = ast_entry->vdev_id;
731 		ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
732 		qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
733 			     &ast_entry->peer->mac_addr.raw[0],
734 			     DP_MAC_ADDR_LEN);
735 		qdf_spin_unlock_bh(&soc->ast_lock);
736 		return true;
737 	}
738 
739 	qdf_spin_unlock_bh(&soc->ast_lock);
740 	return false;
741 }
742 
743 /**
744  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
745  *                            with given mac address
746  *
747  * @soc : data path soc handle
748  * @ast_mac_addr : AST entry mac address
749  * @callback : callback function to called on ast delete response from FW
750  * @cookie : argument to be passed to callback
751  *
752  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
753  *          is sent
754  *          QDF_STATUS_E_INVAL false if ast entry not found
755  */
756 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
757 					       uint8_t *mac_addr,
758 					       txrx_ast_free_cb callback,
759 					       void *cookie)
760 
761 {
762 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
763 	struct dp_ast_entry *ast_entry;
764 	txrx_ast_free_cb cb = NULL;
765 	void *arg = NULL;
766 
767 	qdf_spin_lock_bh(&soc->ast_lock);
768 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
769 	if (!ast_entry) {
770 		qdf_spin_unlock_bh(&soc->ast_lock);
771 		return -QDF_STATUS_E_INVAL;
772 	}
773 
774 	if (ast_entry->callback) {
775 		cb = ast_entry->callback;
776 		arg = ast_entry->cookie;
777 	}
778 
779 	ast_entry->callback = callback;
780 	ast_entry->cookie = cookie;
781 
782 	/*
783 	 * if delete_in_progress is set AST delete is sent to target
784 	 * and host is waiting for response should not send delete
785 	 * again
786 	 */
787 	if (!ast_entry->delete_in_progress)
788 		dp_peer_del_ast(soc, ast_entry);
789 
790 	qdf_spin_unlock_bh(&soc->ast_lock);
791 	if (cb) {
792 		cb(soc->ctrl_psoc,
793 		   soc,
794 		   arg,
795 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
796 	}
797 	return QDF_STATUS_SUCCESS;
798 }
799 
800 /**
801  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
802  *                                   table if mac address and pdev_id matches
803  *
804  * @soc : data path soc handle
805  * @ast_mac_addr : AST entry mac address
806  * @pdev_id : pdev id
807  * @callback : callback function to called on ast delete response from FW
808  * @cookie : argument to be passed to callback
809  *
810  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
811  *          is sent
812  *          QDF_STATUS_E_INVAL false if ast entry not found
813  */
814 
815 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
816 						uint8_t *mac_addr,
817 						uint8_t pdev_id,
818 						txrx_ast_free_cb callback,
819 						void *cookie)
820 
821 {
822 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
823 	struct dp_ast_entry *ast_entry;
824 	txrx_ast_free_cb cb = NULL;
825 	void *arg = NULL;
826 
827 	qdf_spin_lock_bh(&soc->ast_lock);
828 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
829 
830 	if (!ast_entry) {
831 		qdf_spin_unlock_bh(&soc->ast_lock);
832 		return -QDF_STATUS_E_INVAL;
833 	}
834 
835 	if (ast_entry->callback) {
836 		cb = ast_entry->callback;
837 		arg = ast_entry->cookie;
838 	}
839 
840 	ast_entry->callback = callback;
841 	ast_entry->cookie = cookie;
842 
843 	/*
844 	 * if delete_in_progress is set AST delete is sent to target
845 	 * and host is waiting for response should not sent delete
846 	 * again
847 	 */
848 	if (!ast_entry->delete_in_progress)
849 		dp_peer_del_ast(soc, ast_entry);
850 
851 	qdf_spin_unlock_bh(&soc->ast_lock);
852 
853 	if (cb) {
854 		cb(soc->ctrl_psoc,
855 		   soc,
856 		   arg,
857 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
858 	}
859 	return QDF_STATUS_SUCCESS;
860 }
861 
862 /**
863  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
864  * @ring_num: ring num of the ring being queried
865  * @grp_mask: the grp_mask array for the ring type in question.
866  *
867  * The grp_mask array is indexed by group number and the bit fields correspond
868  * to ring numbers.  We are finding which interrupt group a ring belongs to.
869  *
870  * Return: the index in the grp_mask array with the ring number.
871  * -QDF_STATUS_E_NOENT if no entry is found
872  */
873 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
874 {
875 	int ext_group_num;
876 	int mask = 1 << ring_num;
877 
878 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
879 	     ext_group_num++) {
880 		if (mask & grp_mask[ext_group_num])
881 			return ext_group_num;
882 	}
883 
884 	return -QDF_STATUS_E_NOENT;
885 }
886 
887 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
888 				       enum hal_ring_type ring_type,
889 				       int ring_num)
890 {
891 	int *grp_mask;
892 
893 	switch (ring_type) {
894 	case WBM2SW_RELEASE:
895 		/* dp_tx_comp_handler - soc->tx_comp_ring */
896 		if (ring_num < 3)
897 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
898 
899 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
900 		else if (ring_num == 3) {
901 			/* sw treats this as a separate ring type */
902 			grp_mask = &soc->wlan_cfg_ctx->
903 				int_rx_wbm_rel_ring_mask[0];
904 			ring_num = 0;
905 		} else {
906 			qdf_assert(0);
907 			return -QDF_STATUS_E_NOENT;
908 		}
909 	break;
910 
911 	case REO_EXCEPTION:
912 		/* dp_rx_err_process - &soc->reo_exception_ring */
913 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
914 	break;
915 
916 	case REO_DST:
917 		/* dp_rx_process - soc->reo_dest_ring */
918 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
919 	break;
920 
921 	case REO_STATUS:
922 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
923 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
924 	break;
925 
926 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
927 	case RXDMA_MONITOR_STATUS:
928 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
929 	case RXDMA_MONITOR_DST:
930 		/* dp_mon_process */
931 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
932 	break;
933 	case RXDMA_DST:
934 		/* dp_rxdma_err_process */
935 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
936 	break;
937 
938 	case RXDMA_BUF:
939 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
940 	break;
941 
942 	case RXDMA_MONITOR_BUF:
943 		/* TODO: support low_thresh interrupt */
944 		return -QDF_STATUS_E_NOENT;
945 	break;
946 
947 	case TCL_DATA:
948 	case TCL_CMD:
949 	case REO_CMD:
950 	case SW2WBM_RELEASE:
951 	case WBM_IDLE_LINK:
952 		/* normally empty SW_TO_HW rings */
953 		return -QDF_STATUS_E_NOENT;
954 	break;
955 
956 	case TCL_STATUS:
957 	case REO_REINJECT:
958 		/* misc unused rings */
959 		return -QDF_STATUS_E_NOENT;
960 	break;
961 
962 	case CE_SRC:
963 	case CE_DST:
964 	case CE_DST_STATUS:
965 		/* CE_rings - currently handled by hif */
966 	default:
967 		return -QDF_STATUS_E_NOENT;
968 	break;
969 	}
970 
971 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
972 }
973 
974 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
975 			      *ring_params, int ring_type, int ring_num)
976 {
977 	int msi_group_number;
978 	int msi_data_count;
979 	int ret;
980 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
981 
982 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
983 					    &msi_data_count, &msi_data_start,
984 					    &msi_irq_start);
985 
986 	if (ret)
987 		return;
988 
989 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
990 						       ring_num);
991 	if (msi_group_number < 0) {
992 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
993 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
994 			ring_type, ring_num);
995 		ring_params->msi_addr = 0;
996 		ring_params->msi_data = 0;
997 		return;
998 	}
999 
1000 	if (msi_group_number > msi_data_count) {
1001 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
1002 			FL("2 msi_groups will share an msi; msi_group_num %d"),
1003 			msi_group_number);
1004 
1005 		QDF_ASSERT(0);
1006 	}
1007 
1008 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1009 
1010 	ring_params->msi_addr = addr_low;
1011 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1012 	ring_params->msi_data = (msi_group_number % msi_data_count)
1013 		+ msi_data_start;
1014 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1015 }
1016 
1017 /**
1018  * dp_print_ast_stats() - Dump AST table contents
1019  * @soc: Datapath soc handle
1020  *
1021  * return void
1022  */
1023 #ifdef FEATURE_AST
1024 void dp_print_ast_stats(struct dp_soc *soc)
1025 {
1026 	uint8_t i;
1027 	uint8_t num_entries = 0;
1028 	struct dp_vdev *vdev;
1029 	struct dp_pdev *pdev;
1030 	struct dp_peer *peer;
1031 	struct dp_ast_entry *ase, *tmp_ase;
1032 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1033 			"NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
1034 			"DA", "HMWDS_SEC"};
1035 
1036 	DP_PRINT_STATS("AST Stats:");
1037 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1038 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1039 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1040 	DP_PRINT_STATS("AST Table:");
1041 
1042 	qdf_spin_lock_bh(&soc->ast_lock);
1043 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1044 		pdev = soc->pdev_list[i];
1045 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
1046 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1047 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1048 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1049 					DP_PRINT_STATS("%6d mac_addr = %pM"
1050 							" peer_mac_addr = %pM"
1051 							" peer_id = %u"
1052 							" type = %s"
1053 							" next_hop = %d"
1054 							" is_active = %d"
1055 							" is_bss = %d"
1056 							" ast_idx = %d"
1057 							" ast_hash = %d"
1058 							" delete_in_progress = %d"
1059 							" pdev_id = %d"
1060 							" vdev_id = %d",
1061 							++num_entries,
1062 							ase->mac_addr.raw,
1063 							ase->peer->mac_addr.raw,
1064 							ase->peer->peer_ids[0],
1065 							type[ase->type],
1066 							ase->next_hop,
1067 							ase->is_active,
1068 							ase->is_bss,
1069 							ase->ast_idx,
1070 							ase->ast_hash_value,
1071 							ase->delete_in_progress,
1072 							ase->pdev_id,
1073 							ase->vdev_id);
1074 				}
1075 			}
1076 		}
1077 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1078 	}
1079 	qdf_spin_unlock_bh(&soc->ast_lock);
1080 }
1081 #else
1082 void dp_print_ast_stats(struct dp_soc *soc)
1083 {
1084 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1085 	return;
1086 }
1087 #endif
1088 
1089 /**
1090  *  dp_print_peer_table() - Dump all Peer stats
1091  * @vdev: Datapath Vdev handle
1092  *
1093  * return void
1094  */
1095 static void dp_print_peer_table(struct dp_vdev *vdev)
1096 {
1097 	struct dp_peer *peer = NULL;
1098 
1099 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1100 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1101 		if (!peer) {
1102 			DP_PRINT_STATS("Invalid Peer");
1103 			return;
1104 		}
1105 		DP_PRINT_STATS("    peer_mac_addr = %pM"
1106 			       " nawds_enabled = %d"
1107 			       " bss_peer = %d"
1108 			       " wapi = %d"
1109 			       " wds_enabled = %d"
1110 			       " delete in progress = %d"
1111 			       " peer id = %d",
1112 			       peer->mac_addr.raw,
1113 			       peer->nawds_enabled,
1114 			       peer->bss_peer,
1115 			       peer->wapi,
1116 			       peer->wds_enabled,
1117 			       peer->delete_in_progress,
1118 			       peer->peer_ids[0]);
1119 	}
1120 }
1121 
1122 /*
1123  * dp_setup_srng - Internal function to setup SRNG rings used by data path
1124  */
1125 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
1126 	int ring_type, int ring_num, int mac_id, uint32_t num_entries)
1127 {
1128 	void *hal_soc = soc->hal_soc;
1129 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
1130 	/* TODO: See if we should get align size from hal */
1131 	uint32_t ring_base_align = 8;
1132 	struct hal_srng_params ring_params;
1133 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
1134 
1135 	/* TODO: Currently hal layer takes care of endianness related settings.
1136 	 * See if these settings need to passed from DP layer
1137 	 */
1138 	ring_params.flags = 0;
1139 
1140 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
1141 	srng->hal_srng = NULL;
1142 	srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
1143 	srng->num_entries = num_entries;
1144 
1145 	if (!soc->dp_soc_reinit) {
1146 		srng->base_vaddr_unaligned =
1147 			qdf_mem_alloc_consistent(soc->osdev,
1148 						 soc->osdev->dev,
1149 						 srng->alloc_size,
1150 						 &srng->base_paddr_unaligned);
1151 	}
1152 
1153 	if (!srng->base_vaddr_unaligned) {
1154 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1155 			FL("alloc failed - ring_type: %d, ring_num %d"),
1156 			ring_type, ring_num);
1157 		return QDF_STATUS_E_NOMEM;
1158 	}
1159 
1160 	ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
1161 		((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
1162 	ring_params.ring_base_paddr = srng->base_paddr_unaligned +
1163 		((unsigned long)(ring_params.ring_base_vaddr) -
1164 		(unsigned long)srng->base_vaddr_unaligned);
1165 	ring_params.num_entries = num_entries;
1166 
1167 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1168 		  FL("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u"),
1169 		  ring_type, ring_num, (void *)ring_params.ring_base_vaddr,
1170 		  (void *)ring_params.ring_base_paddr, ring_params.num_entries);
1171 
1172 	if (soc->intr_mode == DP_INTR_MSI) {
1173 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
1174 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1175 			  FL("Using MSI for ring_type: %d, ring_num %d"),
1176 			  ring_type, ring_num);
1177 
1178 	} else {
1179 		ring_params.msi_data = 0;
1180 		ring_params.msi_addr = 0;
1181 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1182 			  FL("Skipping MSI for ring_type: %d, ring_num %d"),
1183 			  ring_type, ring_num);
1184 	}
1185 
1186 	/*
1187 	 * Setup interrupt timer and batch counter thresholds for
1188 	 * interrupt mitigation based on ring type
1189 	 */
1190 	if (ring_type == REO_DST) {
1191 		ring_params.intr_timer_thres_us =
1192 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1193 		ring_params.intr_batch_cntr_thres_entries =
1194 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1195 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
1196 		ring_params.intr_timer_thres_us =
1197 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1198 		ring_params.intr_batch_cntr_thres_entries =
1199 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1200 	} else {
1201 		ring_params.intr_timer_thres_us =
1202 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1203 		ring_params.intr_batch_cntr_thres_entries =
1204 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1205 	}
1206 
1207 	/* Enable low threshold interrupts for rx buffer rings (regular and
1208 	 * monitor buffer rings.
1209 	 * TODO: See if this is required for any other ring
1210 	 */
1211 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1212 		(ring_type == RXDMA_MONITOR_STATUS)) {
1213 		/* TODO: Setting low threshold to 1/8th of ring size
1214 		 * see if this needs to be configurable
1215 		 */
1216 		ring_params.low_threshold = num_entries >> 3;
1217 		ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1218 		ring_params.intr_timer_thres_us =
1219 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1220 		ring_params.intr_batch_cntr_thres_entries = 0;
1221 	}
1222 
1223 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
1224 		mac_id, &ring_params);
1225 
1226 	if (!srng->hal_srng) {
1227 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1228 				srng->alloc_size,
1229 				srng->base_vaddr_unaligned,
1230 				srng->base_paddr_unaligned, 0);
1231 	}
1232 
1233 	return 0;
1234 }
1235 
1236 /*
1237  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
1238  * @soc: DP SOC handle
1239  * @srng: source ring structure
1240  * @ring_type: type of ring
1241  * @ring_num: ring number
1242  *
1243  * Return: None
1244  */
1245 static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
1246 			   int ring_type, int ring_num)
1247 {
1248 	if (!srng->hal_srng) {
1249 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1250 			  FL("Ring type: %d, num:%d not setup"),
1251 			  ring_type, ring_num);
1252 		return;
1253 	}
1254 
1255 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1256 	srng->hal_srng = NULL;
1257 }
1258 
1259 /**
1260  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
1261  * Any buffers allocated and attached to ring entries are expected to be freed
1262  * before calling this function.
1263  */
1264 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
1265 	int ring_type, int ring_num)
1266 {
1267 	if (!soc->dp_soc_reinit) {
1268 		if (!srng->hal_srng && (srng->alloc_size == 0)) {
1269 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1270 				  FL("Ring type: %d, num:%d not setup"),
1271 				  ring_type, ring_num);
1272 			return;
1273 		}
1274 
1275 		if (srng->hal_srng) {
1276 			hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1277 			srng->hal_srng = NULL;
1278 		}
1279 	}
1280 
1281 	if (srng->alloc_size) {
1282 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1283 					srng->alloc_size,
1284 					srng->base_vaddr_unaligned,
1285 					srng->base_paddr_unaligned, 0);
1286 		srng->alloc_size = 0;
1287 	}
1288 }
1289 
1290 /* TODO: Need this interface from HIF */
1291 void *hif_get_hal_handle(void *hif_handle);
1292 
1293 /*
1294  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1295  * @dp_ctx: DP SOC handle
1296  * @budget: Number of frames/descriptors that can be processed in one shot
1297  *
1298  * Return: remaining budget/quota for the soc device
1299  */
1300 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
1301 {
1302 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1303 	struct dp_soc *soc = int_ctx->soc;
1304 	int ring = 0;
1305 	uint32_t work_done  = 0;
1306 	int budget = dp_budget;
1307 	uint8_t tx_mask = int_ctx->tx_ring_mask;
1308 	uint8_t rx_mask = int_ctx->rx_ring_mask;
1309 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1310 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1311 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1312 	uint32_t remaining_quota = dp_budget;
1313 	struct dp_pdev *pdev = NULL;
1314 	int mac_id;
1315 
1316 	/* Process Tx completion interrupts first to return back buffers */
1317 	while (tx_mask) {
1318 		if (tx_mask & 0x1) {
1319 			work_done = dp_tx_comp_handler(soc,
1320 					soc->tx_comp_ring[ring].hal_srng,
1321 					remaining_quota);
1322 
1323 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1324 				  "tx mask 0x%x ring %d, budget %d, work_done %d",
1325 				  tx_mask, ring, budget, work_done);
1326 
1327 			budget -= work_done;
1328 			if (budget <= 0)
1329 				goto budget_done;
1330 
1331 			remaining_quota = budget;
1332 		}
1333 		tx_mask = tx_mask >> 1;
1334 		ring++;
1335 	}
1336 
1337 
1338 	/* Process REO Exception ring interrupt */
1339 	if (rx_err_mask) {
1340 		work_done = dp_rx_err_process(soc,
1341 				soc->reo_exception_ring.hal_srng,
1342 				remaining_quota);
1343 
1344 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1345 			"REO Exception Ring: work_done %d budget %d",
1346 			work_done, budget);
1347 
1348 		budget -=  work_done;
1349 		if (budget <= 0) {
1350 			goto budget_done;
1351 		}
1352 		remaining_quota = budget;
1353 	}
1354 
1355 	/* Process Rx WBM release ring interrupt */
1356 	if (rx_wbm_rel_mask) {
1357 		work_done = dp_rx_wbm_err_process(soc,
1358 				soc->rx_rel_ring.hal_srng, remaining_quota);
1359 
1360 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1361 			"WBM Release Ring: work_done %d budget %d",
1362 			work_done, budget);
1363 
1364 		budget -=  work_done;
1365 		if (budget <= 0) {
1366 			goto budget_done;
1367 		}
1368 		remaining_quota = budget;
1369 	}
1370 
1371 	/* Process Rx interrupts */
1372 	if (rx_mask) {
1373 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1374 			if (rx_mask & (1 << ring)) {
1375 				work_done = dp_rx_process(int_ctx,
1376 					    soc->reo_dest_ring[ring].hal_srng,
1377 					    ring,
1378 					    remaining_quota);
1379 
1380 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1381 					"rx mask 0x%x ring %d, work_done %d budget %d",
1382 					rx_mask, ring, work_done, budget);
1383 
1384 				budget -=  work_done;
1385 				if (budget <= 0)
1386 					goto budget_done;
1387 				remaining_quota = budget;
1388 			}
1389 		}
1390 	}
1391 
1392 	if (reo_status_mask)
1393 		dp_reo_status_ring_handler(soc);
1394 
1395 	/* Process LMAC interrupts */
1396 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
1397 		pdev = soc->pdev_list[ring];
1398 		if (pdev == NULL)
1399 			continue;
1400 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1401 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
1402 								pdev->pdev_id);
1403 
1404 			if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1405 				work_done = dp_mon_process(soc, mac_for_pdev,
1406 						remaining_quota);
1407 				budget -= work_done;
1408 				if (budget <= 0)
1409 					goto budget_done;
1410 				remaining_quota = budget;
1411 			}
1412 
1413 			if (int_ctx->rxdma2host_ring_mask &
1414 					(1 << mac_for_pdev)) {
1415 				work_done = dp_rxdma_err_process(soc,
1416 							mac_for_pdev,
1417 							remaining_quota);
1418 				budget -=  work_done;
1419 				if (budget <= 0)
1420 					goto budget_done;
1421 				remaining_quota = budget;
1422 			}
1423 
1424 			if (int_ctx->host2rxdma_ring_mask &
1425 						(1 << mac_for_pdev)) {
1426 				union dp_rx_desc_list_elem_t *desc_list = NULL;
1427 				union dp_rx_desc_list_elem_t *tail = NULL;
1428 				struct dp_srng *rx_refill_buf_ring =
1429 					&pdev->rx_refill_buf_ring;
1430 
1431 				DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1432 						1);
1433 				dp_rx_buffers_replenish(soc, mac_for_pdev,
1434 					rx_refill_buf_ring,
1435 					&soc->rx_desc_buf[mac_for_pdev], 0,
1436 					&desc_list, &tail);
1437 			}
1438 		}
1439 	}
1440 
1441 	qdf_lro_flush(int_ctx->lro_ctx);
1442 
1443 budget_done:
1444 	return dp_budget - budget;
1445 }
1446 
1447 /* dp_interrupt_timer()- timer poll for interrupts
1448  *
1449  * @arg: SoC Handle
1450  *
1451  * Return:
1452  *
1453  */
1454 static void dp_interrupt_timer(void *arg)
1455 {
1456 	struct dp_soc *soc = (struct dp_soc *) arg;
1457 	int i;
1458 
1459 	if (qdf_atomic_read(&soc->cmn_init_done)) {
1460 		for (i = 0;
1461 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1462 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
1463 
1464 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1465 	}
1466 }
1467 
1468 /*
1469  * dp_soc_attach_poll() - Register handlers for DP interrupts
1470  * @txrx_soc: DP SOC handle
1471  *
1472  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1473  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1474  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1475  *
1476  * Return: 0 for success, nonzero for failure.
1477  */
1478 static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
1479 {
1480 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1481 	int i;
1482 
1483 	soc->intr_mode = DP_INTR_POLL;
1484 
1485 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1486 		soc->intr_ctx[i].dp_intr_id = i;
1487 		soc->intr_ctx[i].tx_ring_mask =
1488 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1489 		soc->intr_ctx[i].rx_ring_mask =
1490 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1491 		soc->intr_ctx[i].rx_mon_ring_mask =
1492 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1493 		soc->intr_ctx[i].rx_err_ring_mask =
1494 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1495 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1496 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1497 		soc->intr_ctx[i].reo_status_ring_mask =
1498 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1499 		soc->intr_ctx[i].rxdma2host_ring_mask =
1500 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1501 		soc->intr_ctx[i].soc = soc;
1502 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1503 	}
1504 
1505 	qdf_timer_init(soc->osdev, &soc->int_timer,
1506 			dp_interrupt_timer, (void *)soc,
1507 			QDF_TIMER_TYPE_WAKE_APPS);
1508 
1509 	return QDF_STATUS_SUCCESS;
1510 }
1511 
1512 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
1513 #if defined(CONFIG_MCL)
1514 /*
1515  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1516  * @txrx_soc: DP SOC handle
1517  *
1518  * Call the appropriate attach function based on the mode of operation.
1519  * This is a WAR for enabling monitor mode.
1520  *
1521  * Return: 0 for success. nonzero for failure.
1522  */
1523 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1524 {
1525 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1526 
1527 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1528 	     con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
1529 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1530 				  "%s: Poll mode", __func__);
1531 		return dp_soc_attach_poll(txrx_soc);
1532 	} else {
1533 
1534 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1535 				  "%s: Interrupt  mode", __func__);
1536 		return dp_soc_interrupt_attach(txrx_soc);
1537 	}
1538 }
1539 #else
1540 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
1541 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1542 {
1543 	return dp_soc_attach_poll(txrx_soc);
1544 }
1545 #else
1546 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1547 {
1548 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1549 
1550 	if (hif_is_polled_mode_enabled(soc->hif_handle))
1551 		return dp_soc_attach_poll(txrx_soc);
1552 	else
1553 		return dp_soc_interrupt_attach(txrx_soc);
1554 }
1555 #endif
1556 #endif
1557 
1558 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1559 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1560 {
1561 	int j;
1562 	int num_irq = 0;
1563 
1564 	int tx_mask =
1565 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1566 	int rx_mask =
1567 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1568 	int rx_mon_mask =
1569 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1570 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1571 					soc->wlan_cfg_ctx, intr_ctx_num);
1572 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1573 					soc->wlan_cfg_ctx, intr_ctx_num);
1574 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1575 					soc->wlan_cfg_ctx, intr_ctx_num);
1576 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1577 					soc->wlan_cfg_ctx, intr_ctx_num);
1578 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1579 					soc->wlan_cfg_ctx, intr_ctx_num);
1580 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
1581 					soc->wlan_cfg_ctx, intr_ctx_num);
1582 
1583 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1584 
1585 		if (tx_mask & (1 << j)) {
1586 			irq_id_map[num_irq++] =
1587 				(wbm2host_tx_completions_ring1 - j);
1588 		}
1589 
1590 		if (rx_mask & (1 << j)) {
1591 			irq_id_map[num_irq++] =
1592 				(reo2host_destination_ring1 - j);
1593 		}
1594 
1595 		if (rxdma2host_ring_mask & (1 << j)) {
1596 			irq_id_map[num_irq++] =
1597 				rxdma2host_destination_ring_mac1 -
1598 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1599 		}
1600 
1601 		if (host2rxdma_ring_mask & (1 << j)) {
1602 			irq_id_map[num_irq++] =
1603 				host2rxdma_host_buf_ring_mac1 -
1604 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1605 		}
1606 
1607 		if (host2rxdma_mon_ring_mask & (1 << j)) {
1608 			irq_id_map[num_irq++] =
1609 				host2rxdma_monitor_ring1 -
1610 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1611 		}
1612 
1613 		if (rx_mon_mask & (1 << j)) {
1614 			irq_id_map[num_irq++] =
1615 				ppdu_end_interrupts_mac1 -
1616 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1617 			irq_id_map[num_irq++] =
1618 				rxdma2host_monitor_status_ring_mac1 -
1619 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1620 		}
1621 
1622 		if (rx_wbm_rel_ring_mask & (1 << j))
1623 			irq_id_map[num_irq++] = wbm2host_rx_release;
1624 
1625 		if (rx_err_ring_mask & (1 << j))
1626 			irq_id_map[num_irq++] = reo2host_exception;
1627 
1628 		if (reo_status_ring_mask & (1 << j))
1629 			irq_id_map[num_irq++] = reo2host_status;
1630 
1631 	}
1632 	*num_irq_r = num_irq;
1633 }
1634 
1635 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1636 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1637 		int msi_vector_count, int msi_vector_start)
1638 {
1639 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1640 					soc->wlan_cfg_ctx, intr_ctx_num);
1641 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1642 					soc->wlan_cfg_ctx, intr_ctx_num);
1643 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1644 					soc->wlan_cfg_ctx, intr_ctx_num);
1645 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1646 					soc->wlan_cfg_ctx, intr_ctx_num);
1647 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1648 					soc->wlan_cfg_ctx, intr_ctx_num);
1649 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1650 					soc->wlan_cfg_ctx, intr_ctx_num);
1651 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1652 					soc->wlan_cfg_ctx, intr_ctx_num);
1653 
1654 	unsigned int vector =
1655 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1656 	int num_irq = 0;
1657 
1658 	soc->intr_mode = DP_INTR_MSI;
1659 
1660 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1661 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1662 		irq_id_map[num_irq++] =
1663 			pld_get_msi_irq(soc->osdev->dev, vector);
1664 
1665 	*num_irq_r = num_irq;
1666 }
1667 
1668 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1669 				    int *irq_id_map, int *num_irq)
1670 {
1671 	int msi_vector_count, ret;
1672 	uint32_t msi_base_data, msi_vector_start;
1673 
1674 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1675 					    &msi_vector_count,
1676 					    &msi_base_data,
1677 					    &msi_vector_start);
1678 	if (ret)
1679 		return dp_soc_interrupt_map_calculate_integrated(soc,
1680 				intr_ctx_num, irq_id_map, num_irq);
1681 
1682 	else
1683 		dp_soc_interrupt_map_calculate_msi(soc,
1684 				intr_ctx_num, irq_id_map, num_irq,
1685 				msi_vector_count, msi_vector_start);
1686 }
1687 
1688 /*
1689  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1690  * @txrx_soc: DP SOC handle
1691  *
1692  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1693  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1694  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1695  *
1696  * Return: 0 for success. nonzero for failure.
1697  */
1698 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
1699 {
1700 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1701 
1702 	int i = 0;
1703 	int num_irq = 0;
1704 
1705 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1706 		int ret = 0;
1707 
1708 		/* Map of IRQ ids registered with one interrupt context */
1709 		int irq_id_map[HIF_MAX_GRP_IRQ];
1710 
1711 		int tx_mask =
1712 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1713 		int rx_mask =
1714 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1715 		int rx_mon_mask =
1716 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
1717 		int rx_err_ring_mask =
1718 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1719 		int rx_wbm_rel_ring_mask =
1720 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1721 		int reo_status_ring_mask =
1722 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1723 		int rxdma2host_ring_mask =
1724 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1725 		int host2rxdma_ring_mask =
1726 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1727 		int host2rxdma_mon_ring_mask =
1728 			wlan_cfg_get_host2rxdma_mon_ring_mask(
1729 				soc->wlan_cfg_ctx, i);
1730 
1731 		soc->intr_ctx[i].dp_intr_id = i;
1732 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1733 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1734 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1735 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1736 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1737 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1738 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1739 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1740 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
1741 			 host2rxdma_mon_ring_mask;
1742 
1743 		soc->intr_ctx[i].soc = soc;
1744 
1745 		num_irq = 0;
1746 
1747 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1748 					       &num_irq);
1749 
1750 		ret = hif_register_ext_group(soc->hif_handle,
1751 				num_irq, irq_id_map, dp_service_srngs,
1752 				&soc->intr_ctx[i], "dp_intr",
1753 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1754 
1755 		if (ret) {
1756 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1757 			FL("failed, ret = %d"), ret);
1758 
1759 			return QDF_STATUS_E_FAILURE;
1760 		}
1761 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1762 	}
1763 
1764 	hif_configure_ext_group_interrupts(soc->hif_handle);
1765 
1766 	return QDF_STATUS_SUCCESS;
1767 }
1768 
1769 /*
1770  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1771  * @txrx_soc: DP SOC handle
1772  *
1773  * Return: void
1774  */
1775 static void dp_soc_interrupt_detach(void *txrx_soc)
1776 {
1777 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1778 	int i;
1779 
1780 	if (soc->intr_mode == DP_INTR_POLL) {
1781 		qdf_timer_stop(&soc->int_timer);
1782 		qdf_timer_free(&soc->int_timer);
1783 	} else {
1784 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1785 	}
1786 
1787 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1788 		soc->intr_ctx[i].tx_ring_mask = 0;
1789 		soc->intr_ctx[i].rx_ring_mask = 0;
1790 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1791 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1792 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1793 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1794 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1795 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1796 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
1797 
1798 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1799 	}
1800 }
1801 
1802 #define AVG_MAX_MPDUS_PER_TID 128
1803 #define AVG_TIDS_PER_CLIENT 2
1804 #define AVG_FLOWS_PER_TID 2
1805 #define AVG_MSDUS_PER_FLOW 128
1806 #define AVG_MSDUS_PER_MPDU 4
1807 
1808 /*
1809  * Allocate and setup link descriptor pool that will be used by HW for
1810  * various link and queue descriptors and managed by WBM
1811  */
1812 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1813 {
1814 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1815 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1816 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1817 	uint32_t num_mpdus_per_link_desc =
1818 		hal_num_mpdus_per_link_desc(soc->hal_soc);
1819 	uint32_t num_msdus_per_link_desc =
1820 		hal_num_msdus_per_link_desc(soc->hal_soc);
1821 	uint32_t num_mpdu_links_per_queue_desc =
1822 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1823 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1824 	uint32_t total_link_descs, total_mem_size;
1825 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1826 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1827 	uint32_t num_link_desc_banks;
1828 	uint32_t last_bank_size = 0;
1829 	uint32_t entry_size, num_entries;
1830 	int i;
1831 	uint32_t desc_id = 0;
1832 	qdf_dma_addr_t *baseaddr = NULL;
1833 
1834 	/* Only Tx queue descriptors are allocated from common link descriptor
1835 	 * pool Rx queue descriptors are not included in this because (REO queue
1836 	 * extension descriptors) they are expected to be allocated contiguously
1837 	 * with REO queue descriptors
1838 	 */
1839 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1840 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1841 
1842 	num_mpdu_queue_descs = num_mpdu_link_descs /
1843 		num_mpdu_links_per_queue_desc;
1844 
1845 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1846 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1847 		num_msdus_per_link_desc;
1848 
1849 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1850 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1851 
1852 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1853 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1854 
1855 	/* Round up to power of 2 */
1856 	total_link_descs = 1;
1857 	while (total_link_descs < num_entries)
1858 		total_link_descs <<= 1;
1859 
1860 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1861 		FL("total_link_descs: %u, link_desc_size: %d"),
1862 		total_link_descs, link_desc_size);
1863 	total_mem_size =  total_link_descs * link_desc_size;
1864 
1865 	total_mem_size += link_desc_align;
1866 
1867 	if (total_mem_size <= max_alloc_size) {
1868 		num_link_desc_banks = 0;
1869 		last_bank_size = total_mem_size;
1870 	} else {
1871 		num_link_desc_banks = (total_mem_size) /
1872 			(max_alloc_size - link_desc_align);
1873 		last_bank_size = total_mem_size %
1874 			(max_alloc_size - link_desc_align);
1875 	}
1876 
1877 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1878 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
1879 		total_mem_size, num_link_desc_banks);
1880 
1881 	for (i = 0; i < num_link_desc_banks; i++) {
1882 		if (!soc->dp_soc_reinit) {
1883 			baseaddr = &soc->link_desc_banks[i].
1884 					base_paddr_unaligned;
1885 			soc->link_desc_banks[i].base_vaddr_unaligned =
1886 				qdf_mem_alloc_consistent(soc->osdev,
1887 							 soc->osdev->dev,
1888 							 max_alloc_size,
1889 							 baseaddr);
1890 		}
1891 		soc->link_desc_banks[i].size = max_alloc_size;
1892 
1893 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1894 			soc->link_desc_banks[i].base_vaddr_unaligned) +
1895 			((unsigned long)(
1896 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1897 			link_desc_align));
1898 
1899 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
1900 			soc->link_desc_banks[i].base_paddr_unaligned) +
1901 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1902 			(unsigned long)(
1903 			soc->link_desc_banks[i].base_vaddr_unaligned));
1904 
1905 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
1906 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1907 				FL("Link descriptor memory alloc failed"));
1908 			goto fail;
1909 		}
1910 	}
1911 
1912 	if (last_bank_size) {
1913 		/* Allocate last bank in case total memory required is not exact
1914 		 * multiple of max_alloc_size
1915 		 */
1916 		if (!soc->dp_soc_reinit) {
1917 			baseaddr = &soc->link_desc_banks[i].
1918 					base_paddr_unaligned;
1919 			soc->link_desc_banks[i].base_vaddr_unaligned =
1920 				qdf_mem_alloc_consistent(soc->osdev,
1921 							 soc->osdev->dev,
1922 							 last_bank_size,
1923 							 baseaddr);
1924 		}
1925 		soc->link_desc_banks[i].size = last_bank_size;
1926 
1927 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1928 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
1929 			((unsigned long)(
1930 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1931 			link_desc_align));
1932 
1933 		soc->link_desc_banks[i].base_paddr =
1934 			(unsigned long)(
1935 			soc->link_desc_banks[i].base_paddr_unaligned) +
1936 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1937 			(unsigned long)(
1938 			soc->link_desc_banks[i].base_vaddr_unaligned));
1939 	}
1940 
1941 
1942 	/* Allocate and setup link descriptor idle list for HW internal use */
1943 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1944 	total_mem_size = entry_size * total_link_descs;
1945 
1946 	if (total_mem_size <= max_alloc_size) {
1947 		void *desc;
1948 
1949 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
1950 			WBM_IDLE_LINK, 0, 0, total_link_descs)) {
1951 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1952 				FL("Link desc idle ring setup failed"));
1953 			goto fail;
1954 		}
1955 
1956 		hal_srng_access_start_unlocked(soc->hal_soc,
1957 			soc->wbm_idle_link_ring.hal_srng);
1958 
1959 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1960 			soc->link_desc_banks[i].base_paddr; i++) {
1961 			uint32_t num_entries = (soc->link_desc_banks[i].size -
1962 				((unsigned long)(
1963 				soc->link_desc_banks[i].base_vaddr) -
1964 				(unsigned long)(
1965 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1966 				/ link_desc_size;
1967 			unsigned long paddr = (unsigned long)(
1968 				soc->link_desc_banks[i].base_paddr);
1969 
1970 			while (num_entries && (desc = hal_srng_src_get_next(
1971 				soc->hal_soc,
1972 				soc->wbm_idle_link_ring.hal_srng))) {
1973 				hal_set_link_desc_addr(desc,
1974 					LINK_DESC_COOKIE(desc_id, i), paddr);
1975 				num_entries--;
1976 				desc_id++;
1977 				paddr += link_desc_size;
1978 			}
1979 		}
1980 		hal_srng_access_end_unlocked(soc->hal_soc,
1981 			soc->wbm_idle_link_ring.hal_srng);
1982 	} else {
1983 		uint32_t num_scatter_bufs;
1984 		uint32_t num_entries_per_buf;
1985 		uint32_t rem_entries;
1986 		uint8_t *scatter_buf_ptr;
1987 		uint16_t scatter_buf_num;
1988 		uint32_t buf_size = 0;
1989 
1990 		soc->wbm_idle_scatter_buf_size =
1991 			hal_idle_list_scatter_buf_size(soc->hal_soc);
1992 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
1993 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
1994 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1995 					soc->hal_soc, total_mem_size,
1996 					soc->wbm_idle_scatter_buf_size);
1997 
1998 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
1999 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2000 					FL("scatter bufs size out of bounds"));
2001 			goto fail;
2002 		}
2003 
2004 		for (i = 0; i < num_scatter_bufs; i++) {
2005 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
2006 			if (!soc->dp_soc_reinit) {
2007 				buf_size = soc->wbm_idle_scatter_buf_size;
2008 				soc->wbm_idle_scatter_buf_base_vaddr[i] =
2009 					qdf_mem_alloc_consistent(soc->osdev,
2010 								 soc->osdev->
2011 								 dev,
2012 								 buf_size,
2013 								 baseaddr);
2014 			}
2015 			if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
2016 				QDF_TRACE(QDF_MODULE_ID_DP,
2017 					  QDF_TRACE_LEVEL_ERROR,
2018 					  FL("Scatter lst memory alloc fail"));
2019 				goto fail;
2020 			}
2021 		}
2022 
2023 		/* Populate idle list scatter buffers with link descriptor
2024 		 * pointers
2025 		 */
2026 		scatter_buf_num = 0;
2027 		scatter_buf_ptr = (uint8_t *)(
2028 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
2029 		rem_entries = num_entries_per_buf;
2030 
2031 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
2032 			soc->link_desc_banks[i].base_paddr; i++) {
2033 			uint32_t num_link_descs =
2034 				(soc->link_desc_banks[i].size -
2035 				((unsigned long)(
2036 				soc->link_desc_banks[i].base_vaddr) -
2037 				(unsigned long)(
2038 				soc->link_desc_banks[i].base_vaddr_unaligned)))
2039 				/ link_desc_size;
2040 			unsigned long paddr = (unsigned long)(
2041 				soc->link_desc_banks[i].base_paddr);
2042 
2043 			while (num_link_descs) {
2044 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
2045 					LINK_DESC_COOKIE(desc_id, i), paddr);
2046 				num_link_descs--;
2047 				desc_id++;
2048 				paddr += link_desc_size;
2049 				rem_entries--;
2050 				if (rem_entries) {
2051 					scatter_buf_ptr += entry_size;
2052 				} else {
2053 					rem_entries = num_entries_per_buf;
2054 					scatter_buf_num++;
2055 
2056 					if (scatter_buf_num >= num_scatter_bufs)
2057 						break;
2058 
2059 					scatter_buf_ptr = (uint8_t *)(
2060 						soc->wbm_idle_scatter_buf_base_vaddr[
2061 						scatter_buf_num]);
2062 				}
2063 			}
2064 		}
2065 		/* Setup link descriptor idle list in HW */
2066 		hal_setup_link_idle_list(soc->hal_soc,
2067 			soc->wbm_idle_scatter_buf_base_paddr,
2068 			soc->wbm_idle_scatter_buf_base_vaddr,
2069 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
2070 			(uint32_t)(scatter_buf_ptr -
2071 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
2072 			scatter_buf_num-1])), total_link_descs);
2073 	}
2074 	return 0;
2075 
2076 fail:
2077 	if (soc->wbm_idle_link_ring.hal_srng) {
2078 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
2079 				WBM_IDLE_LINK, 0);
2080 	}
2081 
2082 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2083 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2084 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2085 				soc->wbm_idle_scatter_buf_size,
2086 				soc->wbm_idle_scatter_buf_base_vaddr[i],
2087 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
2088 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
2089 		}
2090 	}
2091 
2092 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
2093 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
2094 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2095 				soc->link_desc_banks[i].size,
2096 				soc->link_desc_banks[i].base_vaddr_unaligned,
2097 				soc->link_desc_banks[i].base_paddr_unaligned,
2098 				0);
2099 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
2100 		}
2101 	}
2102 	return QDF_STATUS_E_FAILURE;
2103 }
2104 
2105 /*
2106  * Free link descriptor pool that was setup HW
2107  */
2108 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
2109 {
2110 	int i;
2111 
2112 	if (soc->wbm_idle_link_ring.hal_srng) {
2113 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
2114 			WBM_IDLE_LINK, 0);
2115 	}
2116 
2117 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2118 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2119 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2120 				soc->wbm_idle_scatter_buf_size,
2121 				soc->wbm_idle_scatter_buf_base_vaddr[i],
2122 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
2123 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
2124 		}
2125 	}
2126 
2127 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
2128 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
2129 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2130 				soc->link_desc_banks[i].size,
2131 				soc->link_desc_banks[i].base_vaddr_unaligned,
2132 				soc->link_desc_banks[i].base_paddr_unaligned,
2133 				0);
2134 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
2135 		}
2136 	}
2137 }
2138 
2139 #ifdef IPA_OFFLOAD
2140 #define REO_DST_RING_SIZE_QCA6290 1023
2141 #ifndef QCA_WIFI_QCA8074_VP
2142 #define REO_DST_RING_SIZE_QCA8074 1023
2143 #else
2144 #define REO_DST_RING_SIZE_QCA8074 8
2145 #endif /* QCA_WIFI_QCA8074_VP */
2146 
2147 #else
2148 
2149 #define REO_DST_RING_SIZE_QCA6290 1024
2150 #ifndef QCA_WIFI_QCA8074_VP
2151 #define REO_DST_RING_SIZE_QCA8074 2048
2152 #else
2153 #define REO_DST_RING_SIZE_QCA8074 8
2154 #endif /* QCA_WIFI_QCA8074_VP */
2155 #endif /* IPA_OFFLOAD */
2156 
2157 /*
2158  * dp_ast_aging_timer_fn() - Timer callback function for WDS aging
2159  * @soc: Datapath SOC handle
2160  *
2161  * This is a timer function used to age out stale AST nodes from
2162  * AST table
2163  */
2164 #ifdef FEATURE_WDS
2165 static void dp_ast_aging_timer_fn(void *soc_hdl)
2166 {
2167 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
2168 	struct dp_pdev *pdev;
2169 	struct dp_vdev *vdev;
2170 	struct dp_peer *peer;
2171 	struct dp_ast_entry *ase, *temp_ase;
2172 	int i;
2173 	bool check_wds_ase = false;
2174 
2175 	if (soc->wds_ast_aging_timer_cnt++ >= DP_WDS_AST_AGING_TIMER_CNT) {
2176 		soc->wds_ast_aging_timer_cnt = 0;
2177 		check_wds_ase = true;
2178 	}
2179 
2180 	 /* Peer list access lock */
2181 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
2182 
2183 	/* AST list access lock */
2184 	qdf_spin_lock_bh(&soc->ast_lock);
2185 
2186 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
2187 		pdev = soc->pdev_list[i];
2188 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
2189 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
2190 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
2191 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
2192 					/*
2193 					 * Do not expire static ast entries
2194 					 * and HM WDS entries
2195 					 */
2196 					if (ase->type !=
2197 					    CDP_TXRX_AST_TYPE_WDS &&
2198 					    ase->type !=
2199 					    CDP_TXRX_AST_TYPE_MEC &&
2200 					    ase->type !=
2201 					    CDP_TXRX_AST_TYPE_DA)
2202 						continue;
2203 
2204 					/* Expire MEC entry every n sec.
2205 					 * This needs to be expired in
2206 					 * case if STA backbone is made as
2207 					 * AP backbone, In this case it needs
2208 					 * to be re-added as a WDS entry.
2209 					 */
2210 					if (ase->is_active && ase->type ==
2211 					    CDP_TXRX_AST_TYPE_MEC) {
2212 						ase->is_active = FALSE;
2213 						continue;
2214 					} else if (ase->is_active &&
2215 						   check_wds_ase) {
2216 						ase->is_active = FALSE;
2217 						continue;
2218 					}
2219 
2220 					if (ase->type ==
2221 					    CDP_TXRX_AST_TYPE_MEC) {
2222 						DP_STATS_INC(soc,
2223 							     ast.aged_out, 1);
2224 						dp_peer_del_ast(soc, ase);
2225 					} else if (check_wds_ase) {
2226 						DP_STATS_INC(soc,
2227 							     ast.aged_out, 1);
2228 						dp_peer_del_ast(soc, ase);
2229 					}
2230 				}
2231 			}
2232 		}
2233 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2234 	}
2235 
2236 	qdf_spin_unlock_bh(&soc->ast_lock);
2237 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2238 
2239 	if (qdf_atomic_read(&soc->cmn_init_done))
2240 		qdf_timer_mod(&soc->ast_aging_timer,
2241 			      DP_AST_AGING_TIMER_DEFAULT_MS);
2242 }
2243 
2244 
2245 /*
2246  * dp_soc_wds_attach() - Setup WDS timer and AST table
2247  * @soc:		Datapath SOC handle
2248  *
2249  * Return: None
2250  */
2251 static void dp_soc_wds_attach(struct dp_soc *soc)
2252 {
2253 	soc->wds_ast_aging_timer_cnt = 0;
2254 	qdf_timer_init(soc->osdev, &soc->ast_aging_timer,
2255 		       dp_ast_aging_timer_fn, (void *)soc,
2256 		       QDF_TIMER_TYPE_WAKE_APPS);
2257 
2258 	qdf_timer_mod(&soc->ast_aging_timer, DP_AST_AGING_TIMER_DEFAULT_MS);
2259 }
2260 
2261 /*
2262  * dp_soc_wds_detach() - Detach WDS data structures and timers
2263  * @txrx_soc: DP SOC handle
2264  *
2265  * Return: None
2266  */
2267 static void dp_soc_wds_detach(struct dp_soc *soc)
2268 {
2269 	qdf_timer_stop(&soc->ast_aging_timer);
2270 	qdf_timer_free(&soc->ast_aging_timer);
2271 }
2272 #else
2273 static void dp_soc_wds_attach(struct dp_soc *soc)
2274 {
2275 }
2276 
2277 static void dp_soc_wds_detach(struct dp_soc *soc)
2278 {
2279 }
2280 #endif
2281 
2282 /*
2283  * dp_soc_reset_ring_map() - Reset cpu ring map
2284  * @soc: Datapath soc handler
2285  *
2286  * This api resets the default cpu ring map
2287  */
2288 
2289 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
2290 {
2291 	uint8_t i;
2292 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2293 
2294 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
2295 		switch (nss_config) {
2296 		case dp_nss_cfg_first_radio:
2297 			/*
2298 			 * Setting Tx ring map for one nss offloaded radio
2299 			 */
2300 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
2301 			break;
2302 
2303 		case dp_nss_cfg_second_radio:
2304 			/*
2305 			 * Setting Tx ring for two nss offloaded radios
2306 			 */
2307 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
2308 			break;
2309 
2310 		case dp_nss_cfg_dbdc:
2311 			/*
2312 			 * Setting Tx ring map for 2 nss offloaded radios
2313 			 */
2314 			soc->tx_ring_map[i] =
2315 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
2316 			break;
2317 
2318 		case dp_nss_cfg_dbtc:
2319 			/*
2320 			 * Setting Tx ring map for 3 nss offloaded radios
2321 			 */
2322 			soc->tx_ring_map[i] =
2323 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
2324 			break;
2325 
2326 		default:
2327 			dp_err("tx_ring_map failed due to invalid nss cfg");
2328 			break;
2329 		}
2330 	}
2331 }
2332 
2333 /*
2334  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
2335  * @dp_soc - DP soc handle
2336  * @ring_type - ring type
2337  * @ring_num - ring_num
2338  *
2339  * return 0 or 1
2340  */
2341 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
2342 {
2343 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2344 	uint8_t status = 0;
2345 
2346 	switch (ring_type) {
2347 	case WBM2SW_RELEASE:
2348 	case REO_DST:
2349 	case RXDMA_BUF:
2350 		status = ((nss_config) & (1 << ring_num));
2351 		break;
2352 	default:
2353 		break;
2354 	}
2355 
2356 	return status;
2357 }
2358 
2359 /*
2360  * dp_soc_reset_intr_mask() - reset interrupt mask
2361  * @dp_soc - DP Soc handle
2362  *
2363  * Return: Return void
2364  */
2365 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
2366 {
2367 	uint8_t j;
2368 	int *grp_mask = NULL;
2369 	int group_number, mask, num_ring;
2370 
2371 	/* number of tx ring */
2372 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2373 
2374 	/*
2375 	 * group mask for tx completion  ring.
2376 	 */
2377 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2378 
2379 	/* loop and reset the mask for only offloaded ring */
2380 	for (j = 0; j < num_ring; j++) {
2381 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2382 			continue;
2383 		}
2384 
2385 		/*
2386 		 * Group number corresponding to tx offloaded ring.
2387 		 */
2388 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2389 		if (group_number < 0) {
2390 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2391 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2392 					WBM2SW_RELEASE, j);
2393 			return;
2394 		}
2395 
2396 		/* reset the tx mask for offloaded ring */
2397 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2398 		mask &= (~(1 << j));
2399 
2400 		/*
2401 		 * reset the interrupt mask for offloaded ring.
2402 		 */
2403 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2404 	}
2405 
2406 	/* number of rx rings */
2407 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2408 
2409 	/*
2410 	 * group mask for reo destination ring.
2411 	 */
2412 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2413 
2414 	/* loop and reset the mask for only offloaded ring */
2415 	for (j = 0; j < num_ring; j++) {
2416 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2417 			continue;
2418 		}
2419 
2420 		/*
2421 		 * Group number corresponding to rx offloaded ring.
2422 		 */
2423 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2424 		if (group_number < 0) {
2425 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2426 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2427 					REO_DST, j);
2428 			return;
2429 		}
2430 
2431 		/* set the interrupt mask for offloaded ring */
2432 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2433 		mask &= (~(1 << j));
2434 
2435 		/*
2436 		 * set the interrupt mask to zero for rx offloaded radio.
2437 		 */
2438 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2439 	}
2440 
2441 	/*
2442 	 * group mask for Rx buffer refill ring
2443 	 */
2444 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2445 
2446 	/* loop and reset the mask for only offloaded ring */
2447 	for (j = 0; j < MAX_PDEV_CNT; j++) {
2448 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2449 			continue;
2450 		}
2451 
2452 		/*
2453 		 * Group number corresponding to rx offloaded ring.
2454 		 */
2455 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2456 		if (group_number < 0) {
2457 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2458 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2459 					REO_DST, j);
2460 			return;
2461 		}
2462 
2463 		/* set the interrupt mask for offloaded ring */
2464 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2465 				group_number);
2466 		mask &= (~(1 << j));
2467 
2468 		/*
2469 		 * set the interrupt mask to zero for rx offloaded radio.
2470 		 */
2471 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2472 			group_number, mask);
2473 	}
2474 }
2475 
2476 #ifdef IPA_OFFLOAD
2477 /**
2478  * dp_reo_remap_config() - configure reo remap register value based
2479  *                         nss configuration.
2480  *		based on offload_radio value below remap configuration
2481  *		get applied.
2482  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2483  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2484  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2485  *		3 - both Radios handled by NSS (remap not required)
2486  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2487  *
2488  * @remap1: output parameter indicates reo remap 1 register value
2489  * @remap2: output parameter indicates reo remap 2 register value
2490  * Return: bool type, true if remap is configured else false.
2491  */
2492 static bool dp_reo_remap_config(struct dp_soc *soc,
2493 				uint32_t *remap1,
2494 				uint32_t *remap2)
2495 {
2496 	*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
2497 		(0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
2498 
2499 	*remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
2500 		(0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
2501 
2502 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
2503 
2504 	return true;
2505 }
2506 #else
2507 static bool dp_reo_remap_config(struct dp_soc *soc,
2508 				uint32_t *remap1,
2509 				uint32_t *remap2)
2510 {
2511 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2512 
2513 	switch (offload_radio) {
2514 	case dp_nss_cfg_default:
2515 		*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2516 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2517 			(0x3 << 18) | (0x4 << 21)) << 8;
2518 
2519 		*remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2520 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2521 			(0x3 << 18) | (0x4 << 21)) << 8;
2522 		break;
2523 	case dp_nss_cfg_first_radio:
2524 		*remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
2525 			(0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
2526 			(0x2 << 18) | (0x3 << 21)) << 8;
2527 
2528 		*remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
2529 			(0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
2530 			(0x4 << 18) | (0x2 << 21)) << 8;
2531 		break;
2532 
2533 	case dp_nss_cfg_second_radio:
2534 		*remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
2535 			(0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
2536 			(0x1 << 18) | (0x3 << 21)) << 8;
2537 
2538 		*remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
2539 			(0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
2540 			(0x4 << 18) | (0x1 << 21)) << 8;
2541 		break;
2542 
2543 	case dp_nss_cfg_dbdc:
2544 	case dp_nss_cfg_dbtc:
2545 		/* return false if both or all are offloaded to NSS */
2546 		return false;
2547 	}
2548 
2549 	dp_debug("remap1 %x remap2 %x offload_radio %u",
2550 		 *remap1, *remap2, offload_radio);
2551 	return true;
2552 }
2553 #endif
2554 
2555 /*
2556  * dp_reo_frag_dst_set() - configure reo register to set the
2557  *                        fragment destination ring
2558  * @soc : Datapath soc
2559  * @frag_dst_ring : output parameter to set fragment destination ring
2560  *
2561  * Based on offload_radio below fragment destination rings is selected
2562  * 0 - TCL
2563  * 1 - SW1
2564  * 2 - SW2
2565  * 3 - SW3
2566  * 4 - SW4
2567  * 5 - Release
2568  * 6 - FW
2569  * 7 - alternate select
2570  *
2571  * return: void
2572  */
2573 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2574 {
2575 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2576 
2577 	switch (offload_radio) {
2578 	case dp_nss_cfg_default:
2579 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2580 		break;
2581 	case dp_nss_cfg_dbdc:
2582 	case dp_nss_cfg_dbtc:
2583 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2584 		break;
2585 	default:
2586 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2587 				FL("dp_reo_frag_dst_set invalid offload radio config"));
2588 		break;
2589 	}
2590 }
2591 
2592 /*
2593  * dp_soc_cmn_setup() - Common SoC level initializion
2594  * @soc:		Datapath SOC handle
2595  *
2596  * This is an internal function used to setup common SOC data structures,
2597  * to be called from PDEV attach after receiving HW mode capabilities from FW
2598  */
2599 static int dp_soc_cmn_setup(struct dp_soc *soc)
2600 {
2601 	int i;
2602 	struct hal_reo_params reo_params;
2603 	int tx_ring_size;
2604 	int tx_comp_ring_size;
2605 	int reo_dst_ring_size;
2606 	uint32_t entries;
2607 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2608 
2609 	if (qdf_atomic_read(&soc->cmn_init_done))
2610 		return 0;
2611 
2612 	if (dp_hw_link_desc_pool_setup(soc))
2613 		goto fail1;
2614 
2615 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2616 	/* Setup SRNG rings */
2617 	/* Common rings */
2618 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
2619 		wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx))) {
2620 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2621 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
2622 		goto fail1;
2623 	}
2624 
2625 	soc->num_tcl_data_rings = 0;
2626 	/* Tx data rings */
2627 	if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
2628 		soc->num_tcl_data_rings =
2629 			wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
2630 		tx_comp_ring_size =
2631 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2632 		tx_ring_size =
2633 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2634 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2635 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
2636 				TCL_DATA, i, 0, tx_ring_size)) {
2637 				QDF_TRACE(QDF_MODULE_ID_DP,
2638 					QDF_TRACE_LEVEL_ERROR,
2639 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
2640 				goto fail1;
2641 			}
2642 			/*
2643 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2644 			 * count
2645 			 */
2646 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
2647 				WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
2648 				QDF_TRACE(QDF_MODULE_ID_DP,
2649 					QDF_TRACE_LEVEL_ERROR,
2650 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
2651 				goto fail1;
2652 			}
2653 		}
2654 	} else {
2655 		/* This will be incremented during per pdev ring setup */
2656 		soc->num_tcl_data_rings = 0;
2657 	}
2658 
2659 	if (dp_tx_soc_attach(soc)) {
2660 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2661 				FL("dp_tx_soc_attach failed"));
2662 		goto fail1;
2663 	}
2664 
2665 	entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
2666 	/* TCL command and status rings */
2667 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
2668 			  entries)) {
2669 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2670 			FL("dp_srng_setup failed for tcl_cmd_ring"));
2671 		goto fail1;
2672 	}
2673 
2674 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
2675 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
2676 			  entries)) {
2677 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2678 			FL("dp_srng_setup failed for tcl_status_ring"));
2679 		goto fail1;
2680 	}
2681 
2682 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2683 
2684 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2685 	 * descriptors
2686 	 */
2687 
2688 	/* Rx data rings */
2689 	if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2690 		soc->num_reo_dest_rings =
2691 			wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
2692 		QDF_TRACE(QDF_MODULE_ID_DP,
2693 			QDF_TRACE_LEVEL_INFO,
2694 			FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
2695 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2696 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2697 				i, 0, reo_dst_ring_size)) {
2698 				QDF_TRACE(QDF_MODULE_ID_DP,
2699 					  QDF_TRACE_LEVEL_ERROR,
2700 					  FL(RNG_ERR "reo_dest_ring [%d]"), i);
2701 				goto fail1;
2702 			}
2703 		}
2704 	} else {
2705 		/* This will be incremented during per pdev ring setup */
2706 		soc->num_reo_dest_rings = 0;
2707 	}
2708 
2709 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2710 	/* LMAC RxDMA to SW Rings configuration */
2711 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
2712 		/* Only valid for MCL */
2713 		struct dp_pdev *pdev = soc->pdev_list[0];
2714 
2715 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2716 			if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2717 					  RXDMA_DST, 0, i,
2718 					  entries)) {
2719 				QDF_TRACE(QDF_MODULE_ID_DP,
2720 					  QDF_TRACE_LEVEL_ERROR,
2721 					  FL(RNG_ERR "rxdma_err_dst_ring"));
2722 				goto fail1;
2723 			}
2724 		}
2725 	}
2726 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
2727 
2728 	/* REO reinjection ring */
2729 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
2730 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2731 			  entries)) {
2732 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2733 			  FL("dp_srng_setup failed for reo_reinject_ring"));
2734 		goto fail1;
2735 	}
2736 
2737 
2738 	/* Rx release ring */
2739 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2740 		wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx))) {
2741 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2742 			  FL("dp_srng_setup failed for rx_rel_ring"));
2743 		goto fail1;
2744 	}
2745 
2746 
2747 	/* Rx exception ring */
2748 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
2749 	if (dp_srng_setup(soc, &soc->reo_exception_ring,
2750 			  REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries)) {
2751 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2752 			  FL("dp_srng_setup failed for reo_exception_ring"));
2753 		goto fail1;
2754 	}
2755 
2756 
2757 	/* REO command and status rings */
2758 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
2759 		wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx))) {
2760 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2761 			FL("dp_srng_setup failed for reo_cmd_ring"));
2762 		goto fail1;
2763 	}
2764 
2765 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2766 	TAILQ_INIT(&soc->rx.reo_cmd_list);
2767 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2768 
2769 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
2770 		wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx))) {
2771 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2772 			FL("dp_srng_setup failed for reo_status_ring"));
2773 		goto fail1;
2774 	}
2775 
2776 
2777 	/* Reset the cpu ring map if radio is NSS offloaded */
2778 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
2779 		dp_soc_reset_cpu_ring_map(soc);
2780 		dp_soc_reset_intr_mask(soc);
2781 	}
2782 
2783 	/* Setup HW REO */
2784 	qdf_mem_zero(&reo_params, sizeof(reo_params));
2785 
2786 	if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
2787 
2788 		/*
2789 		 * Reo ring remap is not required if both radios
2790 		 * are offloaded to NSS
2791 		 */
2792 		if (!dp_reo_remap_config(soc,
2793 					&reo_params.remap1,
2794 					&reo_params.remap2))
2795 			goto out;
2796 
2797 		reo_params.rx_hash_enabled = true;
2798 	}
2799 
2800 	/* setup the global rx defrag waitlist */
2801 	TAILQ_INIT(&soc->rx.defrag.waitlist);
2802 	soc->rx.defrag.timeout_ms =
2803 		wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
2804 	soc->rx.flags.defrag_timeout_check =
2805 		wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
2806 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
2807 
2808 out:
2809 	/*
2810 	 * set the fragment destination ring
2811 	 */
2812 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2813 
2814 	hal_reo_setup(soc->hal_soc, &reo_params);
2815 
2816 	qdf_atomic_set(&soc->cmn_init_done, 1);
2817 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
2818 	return 0;
2819 fail1:
2820 	/*
2821 	 * Cleanup will be done as part of soc_detach, which will
2822 	 * be called on pdev attach failure
2823 	 */
2824 	return QDF_STATUS_E_FAILURE;
2825 }
2826 
2827 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
2828 
2829 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2830 {
2831 	struct cdp_lro_hash_config lro_hash;
2832 	QDF_STATUS status;
2833 
2834 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2835 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
2836 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2837 		dp_err("LRO, GRO and RX hash disabled");
2838 		return QDF_STATUS_E_FAILURE;
2839 	}
2840 
2841 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2842 
2843 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
2844 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
2845 		lro_hash.lro_enable = 1;
2846 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2847 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
2848 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2849 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
2850 	}
2851 
2852 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
2853 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2854 		 LRO_IPV4_SEED_ARR_SZ));
2855 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2856 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2857 		 LRO_IPV6_SEED_ARR_SZ));
2858 
2859 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2860 
2861 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
2862 		QDF_BUG(0);
2863 		dp_err("lro_hash_config not configured");
2864 		return QDF_STATUS_E_FAILURE;
2865 	}
2866 
2867 	status = soc->cdp_soc.ol_ops->lro_hash_config(pdev->ctrl_pdev,
2868 						      &lro_hash);
2869 	if (!QDF_IS_STATUS_SUCCESS(status)) {
2870 		dp_err("failed to send lro_hash_config to FW %u", status);
2871 		return status;
2872 	}
2873 
2874 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
2875 		lro_hash.lro_enable, lro_hash.tcp_flag,
2876 		lro_hash.tcp_flag_mask);
2877 
2878 	dp_info("toeplitz_hash_ipv4:");
2879 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2880 			   (void *)lro_hash.toeplitz_hash_ipv4,
2881 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2882 			   LRO_IPV4_SEED_ARR_SZ));
2883 
2884 	dp_info("toeplitz_hash_ipv6:");
2885 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2886 			   (void *)lro_hash.toeplitz_hash_ipv6,
2887 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2888 			   LRO_IPV6_SEED_ARR_SZ));
2889 
2890 	return status;
2891 }
2892 
2893 /*
2894 * dp_rxdma_ring_setup() - configure the RX DMA rings
2895 * @soc: data path SoC handle
2896 * @pdev: Physical device handle
2897 *
2898 * Return: 0 - success, > 0 - failure
2899 */
2900 #ifdef QCA_HOST2FW_RXBUF_RING
2901 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2902 	 struct dp_pdev *pdev)
2903 {
2904 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2905 	int max_mac_rings;
2906 	int i;
2907 
2908 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2909 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
2910 
2911 	for (i = 0; i < max_mac_rings; i++) {
2912 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2913 			 "%s: pdev_id %d mac_id %d",
2914 			 __func__, pdev->pdev_id, i);
2915 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
2916 			RXDMA_BUF, 1, i,
2917 			wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx))) {
2918 			QDF_TRACE(QDF_MODULE_ID_DP,
2919 				 QDF_TRACE_LEVEL_ERROR,
2920 				 FL("failed rx mac ring setup"));
2921 			return QDF_STATUS_E_FAILURE;
2922 		}
2923 	}
2924 	return QDF_STATUS_SUCCESS;
2925 }
2926 #else
2927 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2928 	 struct dp_pdev *pdev)
2929 {
2930 	return QDF_STATUS_SUCCESS;
2931 }
2932 #endif
2933 
2934 /**
2935  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2936  * @pdev - DP_PDEV handle
2937  *
2938  * Return: void
2939  */
2940 static inline void
2941 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2942 {
2943 	uint8_t map_id;
2944 	struct dp_soc *soc = pdev->soc;
2945 
2946 	if (!soc)
2947 		return;
2948 
2949 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2950 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
2951 			     default_dscp_tid_map,
2952 			     sizeof(default_dscp_tid_map));
2953 	}
2954 
2955 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
2956 		hal_tx_set_dscp_tid_map(soc->hal_soc,
2957 					default_dscp_tid_map,
2958 					map_id);
2959 	}
2960 }
2961 
2962 #ifdef IPA_OFFLOAD
2963 /**
2964  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
2965  * @soc: data path instance
2966  * @pdev: core txrx pdev context
2967  *
2968  * Return: QDF_STATUS_SUCCESS: success
2969  *         QDF_STATUS_E_RESOURCES: Error return
2970  */
2971 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2972 					   struct dp_pdev *pdev)
2973 {
2974 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2975 	int entries;
2976 
2977 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2978 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
2979 
2980 	/* Setup second Rx refill buffer ring */
2981 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2982 			  IPA_RX_REFILL_BUF_RING_IDX,
2983 			  pdev->pdev_id,
2984 			  entries)) {
2985 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2986 			FL("dp_srng_setup failed second rx refill ring"));
2987 		return QDF_STATUS_E_FAILURE;
2988 	}
2989 	return QDF_STATUS_SUCCESS;
2990 }
2991 
2992 /**
2993  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
2994  * @soc: data path instance
2995  * @pdev: core txrx pdev context
2996  *
2997  * Return: void
2998  */
2999 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3000 					      struct dp_pdev *pdev)
3001 {
3002 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3003 			IPA_RX_REFILL_BUF_RING_IDX);
3004 }
3005 
3006 #else
3007 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3008 					   struct dp_pdev *pdev)
3009 {
3010 	return QDF_STATUS_SUCCESS;
3011 }
3012 
3013 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3014 					      struct dp_pdev *pdev)
3015 {
3016 }
3017 #endif
3018 
3019 #if !defined(DISABLE_MON_CONFIG)
3020 /**
3021  * dp_mon_rings_setup() - Initialize Monitor rings based on target
3022  * @soc: soc handle
3023  * @pdev: physical device handle
3024  *
3025  * Return: nonzero on failure and zero on success
3026  */
3027 static
3028 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3029 {
3030 	int mac_id = 0;
3031 	int pdev_id = pdev->pdev_id;
3032 	int entries;
3033 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3034 
3035 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3036 
3037 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3038 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
3039 
3040 		if (soc->wlan_cfg_ctx->rxdma1_enable) {
3041 			entries =
3042 			   wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
3043 			if (dp_srng_setup(soc,
3044 					  &pdev->rxdma_mon_buf_ring[mac_id],
3045 					  RXDMA_MONITOR_BUF, 0, mac_for_pdev,
3046 					  entries)) {
3047 				QDF_TRACE(QDF_MODULE_ID_DP,
3048 					  QDF_TRACE_LEVEL_ERROR,
3049 					  FL(RNG_ERR "rxdma_mon_buf_ring "));
3050 				return QDF_STATUS_E_NOMEM;
3051 			}
3052 
3053 			entries =
3054 			   wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
3055 			if (dp_srng_setup(soc,
3056 					  &pdev->rxdma_mon_dst_ring[mac_id],
3057 					  RXDMA_MONITOR_DST, 0, mac_for_pdev,
3058 					  entries)) {
3059 				QDF_TRACE(QDF_MODULE_ID_DP,
3060 					  QDF_TRACE_LEVEL_ERROR,
3061 					  FL(RNG_ERR "rxdma_mon_dst_ring"));
3062 				return QDF_STATUS_E_NOMEM;
3063 			}
3064 
3065 			entries =
3066 			    wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3067 			if (dp_srng_setup(soc,
3068 					  &pdev->rxdma_mon_status_ring[mac_id],
3069 					  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
3070 					  entries)) {
3071 				QDF_TRACE(QDF_MODULE_ID_DP,
3072 					  QDF_TRACE_LEVEL_ERROR,
3073 					  FL(RNG_ERR "rxdma_mon_status_ring"));
3074 				return QDF_STATUS_E_NOMEM;
3075 			}
3076 
3077 			entries =
3078 			   wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
3079 			if (dp_srng_setup(soc,
3080 					  &pdev->rxdma_mon_desc_ring[mac_id],
3081 					  RXDMA_MONITOR_DESC, 0, mac_for_pdev,
3082 					  entries)) {
3083 				QDF_TRACE(QDF_MODULE_ID_DP,
3084 					  QDF_TRACE_LEVEL_ERROR,
3085 					  FL(RNG_ERR "rxdma_mon_desc_ring"));
3086 				return QDF_STATUS_E_NOMEM;
3087 			}
3088 		} else {
3089 			entries =
3090 			   wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3091 			if (dp_srng_setup(soc,
3092 					  &pdev->rxdma_mon_status_ring[mac_id],
3093 					  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
3094 					  entries)) {
3095 				QDF_TRACE(QDF_MODULE_ID_DP,
3096 					  QDF_TRACE_LEVEL_ERROR,
3097 					  FL(RNG_ERR "rxdma_mon_status_ring"));
3098 				return QDF_STATUS_E_NOMEM;
3099 			}
3100 		}
3101 	}
3102 
3103 	return QDF_STATUS_SUCCESS;
3104 }
3105 #else
3106 static
3107 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3108 {
3109 	return QDF_STATUS_SUCCESS;
3110 }
3111 #endif
3112 
3113 /*dp_iterate_update_peer_list - update peer stats on cal client timer
3114  * @pdev_hdl: pdev handle
3115  */
3116 #ifdef ATH_SUPPORT_EXT_STAT
3117 void  dp_iterate_update_peer_list(void *pdev_hdl)
3118 {
3119 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
3120 	struct dp_soc *soc = pdev->soc;
3121 	struct dp_vdev *vdev = NULL;
3122 	struct dp_peer *peer = NULL;
3123 
3124 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3125 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3126 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
3127 		DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
3128 			dp_cal_client_update_peer_stats(&peer->stats);
3129 		}
3130 	}
3131 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3132 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3133 }
3134 #else
3135 void  dp_iterate_update_peer_list(void *pdev_hdl)
3136 {
3137 }
3138 #endif
3139 
3140 /*
3141 * dp_pdev_attach_wifi3() - attach txrx pdev
3142 * @ctrl_pdev: Opaque PDEV object
3143 * @txrx_soc: Datapath SOC handle
3144 * @htc_handle: HTC handle for host-target interface
3145 * @qdf_osdev: QDF OS device
3146 * @pdev_id: PDEV ID
3147 *
3148 * Return: DP PDEV handle on success, NULL on failure
3149 */
3150 static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
3151 	struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
3152 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
3153 {
3154 	int tx_ring_size;
3155 	int tx_comp_ring_size;
3156 	int reo_dst_ring_size;
3157 	int entries;
3158 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3159 	int nss_cfg;
3160 
3161 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3162 	struct dp_pdev *pdev = NULL;
3163 
3164 	if (soc->dp_soc_reinit)
3165 		pdev = soc->pdev_list[pdev_id];
3166 	else
3167 		pdev = qdf_mem_malloc(sizeof(*pdev));
3168 
3169 	if (!pdev) {
3170 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3171 			FL("DP PDEV memory allocation failed"));
3172 		goto fail0;
3173 	}
3174 
3175 	/*
3176 	 * Variable to prevent double pdev deinitialization during
3177 	 * radio detach execution .i.e. in the absence of any vdev.
3178 	 */
3179 	pdev->pdev_deinit = 0;
3180 	pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
3181 
3182 	if (!pdev->invalid_peer) {
3183 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3184 			  FL("Invalid peer memory allocation failed"));
3185 		qdf_mem_free(pdev);
3186 		goto fail0;
3187 	}
3188 
3189 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3190 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
3191 
3192 	if (!pdev->wlan_cfg_ctx) {
3193 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3194 			FL("pdev cfg_attach failed"));
3195 
3196 		qdf_mem_free(pdev->invalid_peer);
3197 		qdf_mem_free(pdev);
3198 		goto fail0;
3199 	}
3200 
3201 	/*
3202 	 * set nss pdev config based on soc config
3203 	 */
3204 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
3205 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
3206 			(nss_cfg & (1 << pdev_id)));
3207 
3208 	pdev->soc = soc;
3209 	pdev->ctrl_pdev = ctrl_pdev;
3210 	pdev->pdev_id = pdev_id;
3211 	soc->pdev_list[pdev_id] = pdev;
3212 
3213 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
3214 	soc->pdev_count++;
3215 
3216 	TAILQ_INIT(&pdev->vdev_list);
3217 	qdf_spinlock_create(&pdev->vdev_list_lock);
3218 	pdev->vdev_count = 0;
3219 
3220 	qdf_spinlock_create(&pdev->tx_mutex);
3221 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
3222 	TAILQ_INIT(&pdev->neighbour_peers_list);
3223 	pdev->neighbour_peers_added = false;
3224 	pdev->monitor_configured = false;
3225 
3226 	if (dp_soc_cmn_setup(soc)) {
3227 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3228 			FL("dp_soc_cmn_setup failed"));
3229 		goto fail1;
3230 	}
3231 
3232 	/* Setup per PDEV TCL rings if configured */
3233 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3234 		tx_ring_size =
3235 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
3236 		tx_comp_ring_size =
3237 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
3238 
3239 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
3240 			pdev_id, pdev_id, tx_ring_size)) {
3241 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3242 				FL("dp_srng_setup failed for tcl_data_ring"));
3243 			goto fail1;
3244 		}
3245 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
3246 			WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
3247 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3248 				FL("dp_srng_setup failed for tx_comp_ring"));
3249 			goto fail1;
3250 		}
3251 		soc->num_tcl_data_rings++;
3252 	}
3253 
3254 	/* Tx specific init */
3255 	if (dp_tx_pdev_attach(pdev)) {
3256 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3257 			FL("dp_tx_pdev_attach failed"));
3258 		goto fail1;
3259 	}
3260 
3261 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
3262 	/* Setup per PDEV REO rings if configured */
3263 	if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
3264 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
3265 			pdev_id, pdev_id, reo_dst_ring_size)) {
3266 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3267 				FL("dp_srng_setup failed for reo_dest_ringn"));
3268 			goto fail1;
3269 		}
3270 		soc->num_reo_dest_rings++;
3271 
3272 	}
3273 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
3274 		wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx))) {
3275 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3276 			 FL("dp_srng_setup failed rx refill ring"));
3277 		goto fail1;
3278 	}
3279 
3280 	if (dp_rxdma_ring_setup(soc, pdev)) {
3281 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3282 			 FL("RXDMA ring config failed"));
3283 		goto fail1;
3284 	}
3285 
3286 	if (dp_mon_rings_setup(soc, pdev)) {
3287 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3288 			  FL("MONITOR rings setup failed"));
3289 		goto fail1;
3290 	}
3291 
3292 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
3293 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
3294 		if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
3295 				  0, pdev_id,
3296 				  entries)) {
3297 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3298 				  FL(RNG_ERR "rxdma_err_dst_ring"));
3299 			goto fail1;
3300 		}
3301 	}
3302 
3303 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
3304 		goto fail1;
3305 
3306 	if (dp_ipa_ring_resource_setup(soc, pdev))
3307 		goto fail1;
3308 
3309 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
3310 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3311 			FL("dp_ipa_uc_attach failed"));
3312 		goto fail1;
3313 	}
3314 
3315 	/* Rx specific init */
3316 	if (dp_rx_pdev_attach(pdev)) {
3317 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3318 			  FL("dp_rx_pdev_attach failed"));
3319 		goto fail1;
3320 	}
3321 
3322 	DP_STATS_INIT(pdev);
3323 
3324 	/* Monitor filter init */
3325 	pdev->mon_filter_mode = MON_FILTER_ALL;
3326 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
3327 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
3328 	pdev->fp_data_filter = FILTER_DATA_ALL;
3329 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
3330 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
3331 	pdev->mo_data_filter = FILTER_DATA_ALL;
3332 
3333 	dp_local_peer_id_pool_init(pdev);
3334 
3335 	dp_dscp_tid_map_setup(pdev);
3336 
3337 	/* Rx monitor mode specific init */
3338 	if (dp_rx_pdev_mon_attach(pdev)) {
3339 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3340 				"dp_rx_pdev_mon_attach failed");
3341 		goto fail1;
3342 	}
3343 
3344 	if (dp_wdi_event_attach(pdev)) {
3345 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3346 				"dp_wdi_evet_attach failed");
3347 		goto fail1;
3348 	}
3349 
3350 	/* set the reo destination during initialization */
3351 	pdev->reo_dest = pdev->pdev_id + 1;
3352 
3353 	/*
3354 	 * initialize ppdu tlv list
3355 	 */
3356 	TAILQ_INIT(&pdev->ppdu_info_list);
3357 	pdev->tlv_count = 0;
3358 	pdev->list_depth = 0;
3359 
3360 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
3361 
3362 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
3363 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
3364 			      TRUE);
3365 
3366 	/* initlialize cal client timer */
3367 	dp_cal_client_attach(&pdev->cal_client_ctx, pdev, pdev->soc->osdev,
3368 			     &dp_iterate_update_peer_list);
3369 
3370 	return (struct cdp_pdev *)pdev;
3371 
3372 fail1:
3373 	dp_pdev_detach((struct cdp_pdev *)pdev, 0);
3374 
3375 fail0:
3376 	return NULL;
3377 }
3378 
3379 /*
3380 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
3381 * @soc: data path SoC handle
3382 * @pdev: Physical device handle
3383 *
3384 * Return: void
3385 */
3386 #ifdef QCA_HOST2FW_RXBUF_RING
3387 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3388 	 struct dp_pdev *pdev)
3389 {
3390 	int max_mac_rings =
3391 		 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
3392 	int i;
3393 
3394 	max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
3395 				max_mac_rings : MAX_RX_MAC_RINGS;
3396 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
3397 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
3398 			 RXDMA_BUF, 1);
3399 
3400 	qdf_timer_free(&soc->mon_reap_timer);
3401 }
3402 #else
3403 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3404 	 struct dp_pdev *pdev)
3405 {
3406 }
3407 #endif
3408 
3409 /*
3410  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3411  * @pdev: device object
3412  *
3413  * Return: void
3414  */
3415 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3416 {
3417 	struct dp_neighbour_peer *peer = NULL;
3418 	struct dp_neighbour_peer *temp_peer = NULL;
3419 
3420 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3421 			neighbour_peer_list_elem, temp_peer) {
3422 		/* delete this peer from the list */
3423 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
3424 				peer, neighbour_peer_list_elem);
3425 		qdf_mem_free(peer);
3426 	}
3427 
3428 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3429 }
3430 
3431 /**
3432 * dp_htt_ppdu_stats_detach() - detach stats resources
3433 * @pdev: Datapath PDEV handle
3434 *
3435 * Return: void
3436 */
3437 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3438 {
3439 	struct ppdu_info *ppdu_info, *ppdu_info_next;
3440 
3441 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3442 			ppdu_info_list_elem, ppdu_info_next) {
3443 		if (!ppdu_info)
3444 			break;
3445 		qdf_assert_always(ppdu_info->nbuf);
3446 		qdf_nbuf_free(ppdu_info->nbuf);
3447 		qdf_mem_free(ppdu_info);
3448 	}
3449 }
3450 
3451 #if !defined(DISABLE_MON_CONFIG)
3452 
3453 static
3454 void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
3455 			 int mac_id)
3456 {
3457 		if (soc->wlan_cfg_ctx->rxdma1_enable) {
3458 			dp_srng_cleanup(soc,
3459 					&pdev->rxdma_mon_buf_ring[mac_id],
3460 					RXDMA_MONITOR_BUF, 0);
3461 
3462 			dp_srng_cleanup(soc,
3463 					&pdev->rxdma_mon_dst_ring[mac_id],
3464 					RXDMA_MONITOR_DST, 0);
3465 
3466 			dp_srng_cleanup(soc,
3467 					&pdev->rxdma_mon_status_ring[mac_id],
3468 					RXDMA_MONITOR_STATUS, 0);
3469 
3470 			dp_srng_cleanup(soc,
3471 					&pdev->rxdma_mon_desc_ring[mac_id],
3472 					RXDMA_MONITOR_DESC, 0);
3473 
3474 			dp_srng_cleanup(soc,
3475 					&pdev->rxdma_err_dst_ring[mac_id],
3476 					RXDMA_DST, 0);
3477 		} else {
3478 			dp_srng_cleanup(soc,
3479 					&pdev->rxdma_mon_status_ring[mac_id],
3480 					RXDMA_MONITOR_STATUS, 0);
3481 
3482 			dp_srng_cleanup(soc,
3483 					&pdev->rxdma_err_dst_ring[mac_id],
3484 					RXDMA_DST, 0);
3485 		}
3486 
3487 }
3488 #else
3489 static void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
3490 				int mac_id)
3491 {
3492 }
3493 #endif
3494 
3495 /**
3496  * dp_mon_ring_deinit() - Placeholder to deinitialize Monitor rings
3497  *
3498  * @soc: soc handle
3499  * @pdev: datapath physical dev handle
3500  * @mac_id: mac number
3501  *
3502  * Return: None
3503  */
3504 static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3505 			       int mac_id)
3506 {
3507 }
3508 
3509 /**
3510  * dp_pdev_mem_reset() - Reset txrx pdev memory
3511  * @pdev: dp pdev handle
3512  *
3513  * Return: None
3514  */
3515 static void dp_pdev_mem_reset(struct dp_pdev *pdev)
3516 {
3517 	uint16_t len = 0;
3518 	uint8_t *dp_pdev_offset = (uint8_t *)pdev;
3519 
3520 	len = sizeof(struct dp_pdev) -
3521 		offsetof(struct dp_pdev, pdev_deinit) -
3522 		sizeof(pdev->pdev_deinit);
3523 	dp_pdev_offset = dp_pdev_offset +
3524 			 offsetof(struct dp_pdev, pdev_deinit) +
3525 			 sizeof(pdev->pdev_deinit);
3526 
3527 	qdf_mem_zero(dp_pdev_offset, len);
3528 }
3529 
3530 /**
3531  * dp_pdev_deinit() - Deinit txrx pdev
3532  * @txrx_pdev: Datapath PDEV handle
3533  * @force: Force deinit
3534  *
3535  * Return: None
3536  */
3537 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
3538 {
3539 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3540 	struct dp_soc *soc = pdev->soc;
3541 	qdf_nbuf_t curr_nbuf, next_nbuf;
3542 	int mac_id;
3543 
3544 	/*
3545 	 * Prevent double pdev deinitialization during radio detach
3546 	 * execution .i.e. in the absence of any vdev
3547 	 */
3548 	if (pdev->pdev_deinit)
3549 		return;
3550 
3551 	pdev->pdev_deinit = 1;
3552 
3553 	dp_wdi_event_detach(pdev);
3554 
3555 	dp_tx_pdev_detach(pdev);
3556 
3557 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3558 		dp_srng_deinit(soc, &soc->tcl_data_ring[pdev->pdev_id],
3559 			       TCL_DATA, pdev->pdev_id);
3560 		dp_srng_deinit(soc, &soc->tx_comp_ring[pdev->pdev_id],
3561 			       WBM2SW_RELEASE, pdev->pdev_id);
3562 	}
3563 
3564 	dp_pktlogmod_exit(pdev);
3565 
3566 	dp_rx_pdev_detach(pdev);
3567 	dp_rx_pdev_mon_detach(pdev);
3568 	dp_neighbour_peers_detach(pdev);
3569 	qdf_spinlock_destroy(&pdev->tx_mutex);
3570 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
3571 
3572 	dp_ipa_uc_detach(soc, pdev);
3573 
3574 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
3575 
3576 	/* Cleanup per PDEV REO rings if configured */
3577 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3578 		dp_srng_deinit(soc, &soc->reo_dest_ring[pdev->pdev_id],
3579 			       REO_DST, pdev->pdev_id);
3580 	}
3581 
3582 	dp_srng_deinit(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
3583 
3584 	dp_rxdma_ring_cleanup(soc, pdev);
3585 
3586 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3587 		dp_mon_ring_deinit(soc, pdev, mac_id);
3588 		dp_srng_deinit(soc, &pdev->rxdma_err_dst_ring[mac_id],
3589 			       RXDMA_DST, 0);
3590 	}
3591 
3592 	curr_nbuf = pdev->invalid_peer_head_msdu;
3593 	while (curr_nbuf) {
3594 		next_nbuf = qdf_nbuf_next(curr_nbuf);
3595 		qdf_nbuf_free(curr_nbuf);
3596 		curr_nbuf = next_nbuf;
3597 	}
3598 	pdev->invalid_peer_head_msdu = NULL;
3599 	pdev->invalid_peer_tail_msdu = NULL;
3600 
3601 	dp_htt_ppdu_stats_detach(pdev);
3602 
3603 	qdf_nbuf_free(pdev->sojourn_buf);
3604 
3605 	dp_cal_client_detach(&pdev->cal_client_ctx);
3606 
3607 	soc->pdev_count--;
3608 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
3609 	qdf_mem_free(pdev->invalid_peer);
3610 	qdf_mem_free(pdev->dp_txrx_handle);
3611 	dp_pdev_mem_reset(pdev);
3612 }
3613 
3614 /**
3615  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
3616  * @txrx_pdev: Datapath PDEV handle
3617  * @force: Force deinit
3618  *
3619  * Return: None
3620  */
3621 static void dp_pdev_deinit_wifi3(struct cdp_pdev *txrx_pdev, int force)
3622 {
3623 	dp_pdev_deinit(txrx_pdev, force);
3624 }
3625 
3626 /*
3627  * dp_pdev_detach() - Complete rest of pdev detach
3628  * @txrx_pdev: Datapath PDEV handle
3629  * @force: Force deinit
3630  *
3631  * Return: None
3632  */
3633 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
3634 {
3635 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3636 	struct dp_soc *soc = pdev->soc;
3637 	int mac_id;
3638 
3639 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3640 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
3641 				TCL_DATA, pdev->pdev_id);
3642 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
3643 				WBM2SW_RELEASE, pdev->pdev_id);
3644 	}
3645 
3646 	dp_mon_link_free(pdev);
3647 
3648 	/* Cleanup per PDEV REO rings if configured */
3649 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3650 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
3651 				REO_DST, pdev->pdev_id);
3652 	}
3653 
3654 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
3655 
3656 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3657 		dp_mon_ring_cleanup(soc, pdev, mac_id);
3658 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3659 				RXDMA_DST, 0);
3660 	}
3661 
3662 	soc->pdev_list[pdev->pdev_id] = NULL;
3663 	qdf_mem_free(pdev);
3664 }
3665 
3666 /*
3667  * dp_pdev_detach_wifi3() - detach txrx pdev
3668  * @txrx_pdev: Datapath PDEV handle
3669  * @force: Force detach
3670  *
3671  * Return: None
3672  */
3673 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
3674 {
3675 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3676 	struct dp_soc *soc = pdev->soc;
3677 
3678 	if (soc->dp_soc_reinit) {
3679 		dp_pdev_detach(txrx_pdev, force);
3680 	} else {
3681 		dp_pdev_deinit(txrx_pdev, force);
3682 		dp_pdev_detach(txrx_pdev, force);
3683 	}
3684 }
3685 
3686 /*
3687  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
3688  * @soc: DP SOC handle
3689  */
3690 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
3691 {
3692 	struct reo_desc_list_node *desc;
3693 	struct dp_rx_tid *rx_tid;
3694 
3695 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3696 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
3697 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3698 		rx_tid = &desc->rx_tid;
3699 		qdf_mem_unmap_nbytes_single(soc->osdev,
3700 			rx_tid->hw_qdesc_paddr,
3701 			QDF_DMA_BIDIRECTIONAL,
3702 			rx_tid->hw_qdesc_alloc_size);
3703 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3704 		qdf_mem_free(desc);
3705 	}
3706 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3707 	qdf_list_destroy(&soc->reo_desc_freelist);
3708 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
3709 }
3710 
3711 /**
3712  * dp_soc_mem_reset() - Reset Dp Soc memory
3713  * @soc: DP handle
3714  *
3715  * Return: None
3716  */
3717 static void dp_soc_mem_reset(struct dp_soc *soc)
3718 {
3719 	uint16_t len = 0;
3720 	uint8_t *dp_soc_offset = (uint8_t *)soc;
3721 
3722 	len = sizeof(struct dp_soc) -
3723 		offsetof(struct dp_soc, dp_soc_reinit) -
3724 		sizeof(soc->dp_soc_reinit);
3725 	dp_soc_offset = dp_soc_offset +
3726 			offsetof(struct dp_soc, dp_soc_reinit) +
3727 			sizeof(soc->dp_soc_reinit);
3728 
3729 	qdf_mem_zero(dp_soc_offset, len);
3730 }
3731 
3732 /**
3733  * dp_soc_deinit() - Deinitialize txrx SOC
3734  * @txrx_soc: Opaque DP SOC handle
3735  *
3736  * Return: None
3737  */
3738 static void dp_soc_deinit(void *txrx_soc)
3739 {
3740 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3741 	int i;
3742 
3743 	qdf_atomic_set(&soc->cmn_init_done, 0);
3744 
3745 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3746 		if (soc->pdev_list[i])
3747 			dp_pdev_deinit((struct cdp_pdev *)
3748 					soc->pdev_list[i], 1);
3749 	}
3750 
3751 	qdf_flush_work(&soc->htt_stats.work);
3752 	qdf_disable_work(&soc->htt_stats.work);
3753 
3754 	/* Free pending htt stats messages */
3755 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
3756 
3757 	dp_reo_cmdlist_destroy(soc);
3758 
3759 	dp_peer_find_detach(soc);
3760 
3761 	/* Free the ring memories */
3762 	/* Common rings */
3763 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3764 
3765 	/* Tx data rings */
3766 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3767 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3768 			dp_srng_deinit(soc, &soc->tcl_data_ring[i],
3769 				       TCL_DATA, i);
3770 			dp_srng_deinit(soc, &soc->tx_comp_ring[i],
3771 				       WBM2SW_RELEASE, i);
3772 		}
3773 	}
3774 
3775 	/* TCL command and status rings */
3776 	dp_srng_deinit(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3777 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3778 
3779 	/* Rx data rings */
3780 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3781 		soc->num_reo_dest_rings =
3782 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3783 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
3784 			/* TODO: Get number of rings and ring sizes
3785 			 * from wlan_cfg
3786 			 */
3787 			dp_srng_deinit(soc, &soc->reo_dest_ring[i],
3788 				       REO_DST, i);
3789 		}
3790 	}
3791 	/* REO reinjection ring */
3792 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3793 
3794 	/* Rx release ring */
3795 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3796 
3797 	/* Rx exception ring */
3798 	/* TODO: Better to store ring_type and ring_num in
3799 	 * dp_srng during setup
3800 	 */
3801 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3802 
3803 	/* REO command and status rings */
3804 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3805 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
3806 
3807 	dp_soc_wds_detach(soc);
3808 
3809 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
3810 	qdf_spinlock_destroy(&soc->htt_stats.lock);
3811 
3812 	htt_soc_htc_dealloc(soc->htt_handle);
3813 
3814 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3815 
3816 	dp_reo_cmdlist_destroy(soc);
3817 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3818 	dp_reo_desc_freelist_destroy(soc);
3819 
3820 	qdf_spinlock_destroy(&soc->ast_lock);
3821 
3822 	dp_soc_mem_reset(soc);
3823 }
3824 
3825 /**
3826  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
3827  * @txrx_soc: Opaque DP SOC handle
3828  *
3829  * Return: None
3830  */
3831 static void dp_soc_deinit_wifi3(void *txrx_soc)
3832 {
3833 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3834 
3835 	soc->dp_soc_reinit = 1;
3836 	dp_soc_deinit(txrx_soc);
3837 }
3838 
3839 /*
3840  * dp_soc_detach() - Detach rest of txrx SOC
3841  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
3842  *
3843  * Return: None
3844  */
3845 static void dp_soc_detach(void *txrx_soc)
3846 {
3847 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3848 	int i;
3849 
3850 	qdf_atomic_set(&soc->cmn_init_done, 0);
3851 
3852 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
3853 	 * SW descriptors
3854 	 */
3855 
3856 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3857 		if (soc->pdev_list[i])
3858 			dp_pdev_detach((struct cdp_pdev *)
3859 					     soc->pdev_list[i], 1);
3860 	}
3861 
3862 	/* Free the ring memories */
3863 	/* Common rings */
3864 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3865 
3866 	dp_tx_soc_detach(soc);
3867 
3868 	/* Tx data rings */
3869 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3870 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3871 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
3872 				TCL_DATA, i);
3873 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
3874 				WBM2SW_RELEASE, i);
3875 		}
3876 	}
3877 
3878 	/* TCL command and status rings */
3879 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3880 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3881 
3882 	/* Rx data rings */
3883 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3884 		soc->num_reo_dest_rings =
3885 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3886 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
3887 			/* TODO: Get number of rings and ring sizes
3888 			 * from wlan_cfg
3889 			 */
3890 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
3891 				REO_DST, i);
3892 		}
3893 	}
3894 	/* REO reinjection ring */
3895 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3896 
3897 	/* Rx release ring */
3898 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3899 
3900 	/* Rx exception ring */
3901 	/* TODO: Better to store ring_type and ring_num in
3902 	 * dp_srng during setup
3903 	 */
3904 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3905 
3906 	/* REO command and status rings */
3907 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3908 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
3909 	dp_hw_link_desc_pool_cleanup(soc);
3910 
3911 	htt_soc_detach(soc->htt_handle);
3912 	soc->dp_soc_reinit = 0;
3913 
3914 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
3915 
3916 	qdf_mem_free(soc);
3917 }
3918 
3919 /*
3920  * dp_soc_detach_wifi3() - Detach txrx SOC
3921  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
3922  *
3923  * Return: None
3924  */
3925 static void dp_soc_detach_wifi3(void *txrx_soc)
3926 {
3927 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3928 
3929 	if (soc->dp_soc_reinit) {
3930 		dp_soc_detach(txrx_soc);
3931 	} else {
3932 		dp_soc_deinit(txrx_soc);
3933 		dp_soc_detach(txrx_soc);
3934 	}
3935 
3936 }
3937 
3938 #if !defined(DISABLE_MON_CONFIG)
3939 /**
3940  * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
3941  * @soc: soc handle
3942  * @pdev: physical device handle
3943  * @mac_id: ring number
3944  * @mac_for_pdev: mac_id
3945  *
3946  * Return: non-zero for failure, zero for success
3947  */
3948 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
3949 					struct dp_pdev *pdev,
3950 					int mac_id,
3951 					int mac_for_pdev)
3952 {
3953 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3954 
3955 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
3956 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
3957 					pdev->rxdma_mon_buf_ring[mac_id]
3958 					.hal_srng,
3959 					RXDMA_MONITOR_BUF);
3960 
3961 		if (status != QDF_STATUS_SUCCESS) {
3962 			dp_err("Failed to send htt srng setup message for Rxdma mon buf ring");
3963 			return status;
3964 		}
3965 
3966 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
3967 					pdev->rxdma_mon_dst_ring[mac_id]
3968 					.hal_srng,
3969 					RXDMA_MONITOR_DST);
3970 
3971 		if (status != QDF_STATUS_SUCCESS) {
3972 			dp_err("Failed to send htt srng setup message for Rxdma mon dst ring");
3973 			return status;
3974 		}
3975 
3976 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
3977 					pdev->rxdma_mon_status_ring[mac_id]
3978 					.hal_srng,
3979 					RXDMA_MONITOR_STATUS);
3980 
3981 		if (status != QDF_STATUS_SUCCESS) {
3982 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
3983 			return status;
3984 		}
3985 
3986 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
3987 					pdev->rxdma_mon_desc_ring[mac_id]
3988 					.hal_srng,
3989 					RXDMA_MONITOR_DESC);
3990 
3991 		if (status != QDF_STATUS_SUCCESS) {
3992 			dp_err("Failed to send htt srng message for Rxdma mon desc ring");
3993 			return status;
3994 		}
3995 	} else {
3996 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
3997 					pdev->rxdma_mon_status_ring[mac_id]
3998 					.hal_srng,
3999 					RXDMA_MONITOR_STATUS);
4000 
4001 		if (status != QDF_STATUS_SUCCESS) {
4002 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4003 			return status;
4004 		}
4005 	}
4006 
4007 	return status;
4008 
4009 }
4010 #else
4011 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4012 					struct dp_pdev *pdev,
4013 					int mac_id,
4014 					int mac_for_pdev)
4015 {
4016 	return QDF_STATUS_SUCCESS;
4017 }
4018 #endif
4019 
4020 /*
4021  * dp_rxdma_ring_config() - configure the RX DMA rings
4022  *
4023  * This function is used to configure the MAC rings.
4024  * On MCL host provides buffers in Host2FW ring
4025  * FW refills (copies) buffers to the ring and updates
4026  * ring_idx in register
4027  *
4028  * @soc: data path SoC handle
4029  *
4030  * Return: zero on success, non-zero on failure
4031  */
4032 #ifdef QCA_HOST2FW_RXBUF_RING
4033 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4034 {
4035 	int i;
4036 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4037 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4038 		struct dp_pdev *pdev = soc->pdev_list[i];
4039 
4040 		if (pdev) {
4041 			int mac_id;
4042 			bool dbs_enable = 0;
4043 			int max_mac_rings =
4044 				 wlan_cfg_get_num_mac_rings
4045 				(pdev->wlan_cfg_ctx);
4046 
4047 			htt_srng_setup(soc->htt_handle, 0,
4048 				 pdev->rx_refill_buf_ring.hal_srng,
4049 				 RXDMA_BUF);
4050 
4051 			if (pdev->rx_refill_buf_ring2.hal_srng)
4052 				htt_srng_setup(soc->htt_handle, 0,
4053 					pdev->rx_refill_buf_ring2.hal_srng,
4054 					RXDMA_BUF);
4055 
4056 			if (soc->cdp_soc.ol_ops->
4057 				is_hw_dbs_2x2_capable) {
4058 				dbs_enable = soc->cdp_soc.ol_ops->
4059 					is_hw_dbs_2x2_capable(soc->ctrl_psoc);
4060 			}
4061 
4062 			if (dbs_enable) {
4063 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4064 				QDF_TRACE_LEVEL_ERROR,
4065 				FL("DBS enabled max_mac_rings %d"),
4066 					 max_mac_rings);
4067 			} else {
4068 				max_mac_rings = 1;
4069 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4070 					 QDF_TRACE_LEVEL_ERROR,
4071 					 FL("DBS disabled, max_mac_rings %d"),
4072 					 max_mac_rings);
4073 			}
4074 
4075 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4076 					 FL("pdev_id %d max_mac_rings %d"),
4077 					 pdev->pdev_id, max_mac_rings);
4078 
4079 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
4080 				int mac_for_pdev = dp_get_mac_id_for_pdev(
4081 							mac_id, pdev->pdev_id);
4082 
4083 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4084 					 QDF_TRACE_LEVEL_ERROR,
4085 					 FL("mac_id %d"), mac_for_pdev);
4086 
4087 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4088 					 pdev->rx_mac_buf_ring[mac_id]
4089 						.hal_srng,
4090 					 RXDMA_BUF);
4091 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4092 					pdev->rxdma_err_dst_ring[mac_id]
4093 						.hal_srng,
4094 					RXDMA_DST);
4095 
4096 				/* Configure monitor mode rings */
4097 				status = dp_mon_htt_srng_setup(soc, pdev,
4098 							       mac_id,
4099 							       mac_for_pdev);
4100 				if (status != QDF_STATUS_SUCCESS) {
4101 					dp_err("Failed to send htt monitor messages to target");
4102 					return status;
4103 				}
4104 
4105 			}
4106 		}
4107 	}
4108 
4109 	/*
4110 	 * Timer to reap rxdma status rings.
4111 	 * Needed until we enable ppdu end interrupts
4112 	 */
4113 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
4114 			dp_service_mon_rings, (void *)soc,
4115 			QDF_TIMER_TYPE_WAKE_APPS);
4116 	soc->reap_timer_init = 1;
4117 	return status;
4118 }
4119 #else
4120 /* This is only for WIN */
4121 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4122 {
4123 	int i;
4124 	int mac_id;
4125 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4126 
4127 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4128 		struct dp_pdev *pdev = soc->pdev_list[i];
4129 
4130 		if (pdev == NULL)
4131 			continue;
4132 
4133 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4134 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
4135 
4136 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4137 				pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
4138 #ifndef DISABLE_MON_CONFIG
4139 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4140 				pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4141 				RXDMA_MONITOR_BUF);
4142 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4143 				pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
4144 				RXDMA_MONITOR_DST);
4145 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4146 				pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4147 				RXDMA_MONITOR_STATUS);
4148 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4149 				pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
4150 				RXDMA_MONITOR_DESC);
4151 #endif
4152 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4153 				pdev->rxdma_err_dst_ring[mac_id].hal_srng,
4154 				RXDMA_DST);
4155 		}
4156 	}
4157 	return status;
4158 }
4159 #endif
4160 
4161 /*
4162  * dp_soc_attach_target_wifi3() - SOC initialization in the target
4163  * @cdp_soc: Opaque Datapath SOC handle
4164  *
4165  * Return: zero on success, non-zero on failure
4166  */
4167 static QDF_STATUS
4168 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
4169 {
4170 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4171 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4172 
4173 	htt_soc_attach_target(soc->htt_handle);
4174 
4175 	status = dp_rxdma_ring_config(soc);
4176 	if (status != QDF_STATUS_SUCCESS) {
4177 		dp_err("Failed to send htt srng setup messages to target");
4178 		return status;
4179 	}
4180 
4181 	DP_STATS_INIT(soc);
4182 
4183 	/* initialize work queue for stats processing */
4184 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
4185 
4186 	return QDF_STATUS_SUCCESS;
4187 }
4188 
4189 /*
4190  * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
4191  * @txrx_soc: Datapath SOC handle
4192  */
4193 static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
4194 {
4195 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
4196 	return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
4197 }
4198 /*
4199  * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
4200  * @txrx_soc: Datapath SOC handle
4201  * @nss_cfg: nss config
4202  */
4203 static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
4204 {
4205 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
4206 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
4207 
4208 	wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
4209 
4210 	/*
4211 	 * TODO: masked out based on the per offloaded radio
4212 	 */
4213 	switch (config) {
4214 	case dp_nss_cfg_default:
4215 		break;
4216 	case dp_nss_cfg_dbdc:
4217 	case dp_nss_cfg_dbtc:
4218 		wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
4219 		wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
4220 		wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
4221 		wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
4222 		break;
4223 	default:
4224 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4225 			  "Invalid offload config %d", config);
4226 	}
4227 
4228 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4229 		  FL("nss-wifi<0> nss config is enabled"));
4230 }
4231 /*
4232 * dp_vdev_attach_wifi3() - attach txrx vdev
4233 * @txrx_pdev: Datapath PDEV handle
4234 * @vdev_mac_addr: MAC address of the virtual interface
4235 * @vdev_id: VDEV Id
4236 * @wlan_op_mode: VDEV operating mode
4237 *
4238 * Return: DP VDEV handle on success, NULL on failure
4239 */
4240 static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
4241 	uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
4242 {
4243 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4244 	struct dp_soc *soc = pdev->soc;
4245 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
4246 
4247 	if (!vdev) {
4248 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4249 			FL("DP VDEV memory allocation failed"));
4250 		goto fail0;
4251 	}
4252 
4253 	vdev->pdev = pdev;
4254 	vdev->vdev_id = vdev_id;
4255 	vdev->opmode = op_mode;
4256 	vdev->osdev = soc->osdev;
4257 
4258 	vdev->osif_rx = NULL;
4259 	vdev->osif_rsim_rx_decap = NULL;
4260 	vdev->osif_get_key = NULL;
4261 	vdev->osif_rx_mon = NULL;
4262 	vdev->osif_tx_free_ext = NULL;
4263 	vdev->osif_vdev = NULL;
4264 
4265 	vdev->delete.pending = 0;
4266 	vdev->safemode = 0;
4267 	vdev->drop_unenc = 1;
4268 	vdev->sec_type = cdp_sec_type_none;
4269 #ifdef notyet
4270 	vdev->filters_num = 0;
4271 #endif
4272 
4273 	qdf_mem_copy(
4274 		&vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
4275 
4276 	/* TODO: Initialize default HTT meta data that will be used in
4277 	 * TCL descriptors for packets transmitted from this VDEV
4278 	 */
4279 
4280 	TAILQ_INIT(&vdev->peer_list);
4281 
4282 	if ((soc->intr_mode == DP_INTR_POLL) &&
4283 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
4284 		if ((pdev->vdev_count == 0) ||
4285 		    (wlan_op_mode_monitor == vdev->opmode))
4286 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
4287 	}
4288 
4289 	if (wlan_op_mode_monitor == vdev->opmode) {
4290 		pdev->monitor_vdev = vdev;
4291 		return (struct cdp_vdev *)vdev;
4292 	}
4293 
4294 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
4295 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
4296 	vdev->dscp_tid_map_id = 0;
4297 	vdev->mcast_enhancement_en = 0;
4298 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
4299 
4300 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4301 	/* add this vdev into the pdev's list */
4302 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
4303 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4304 	pdev->vdev_count++;
4305 
4306 	dp_tx_vdev_attach(vdev);
4307 
4308 	if (pdev->vdev_count == 1)
4309 		dp_lro_hash_setup(soc, pdev);
4310 
4311 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4312 		"Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
4313 	DP_STATS_INIT(vdev);
4314 
4315 	if (wlan_op_mode_sta == vdev->opmode)
4316 		dp_peer_create_wifi3((struct cdp_vdev *)vdev,
4317 							vdev->mac_addr.raw,
4318 							NULL);
4319 
4320 	return (struct cdp_vdev *)vdev;
4321 
4322 fail0:
4323 	return NULL;
4324 }
4325 
4326 /**
4327  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
4328  * @vdev: Datapath VDEV handle
4329  * @osif_vdev: OSIF vdev handle
4330  * @ctrl_vdev: UMAC vdev handle
4331  * @txrx_ops: Tx and Rx operations
4332  *
4333  * Return: DP VDEV handle on success, NULL on failure
4334  */
4335 static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
4336 	void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
4337 	struct ol_txrx_ops *txrx_ops)
4338 {
4339 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4340 	vdev->osif_vdev = osif_vdev;
4341 	vdev->ctrl_vdev = ctrl_vdev;
4342 	vdev->osif_rx = txrx_ops->rx.rx;
4343 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
4344 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
4345 	vdev->osif_get_key = txrx_ops->get_key;
4346 	vdev->osif_rx_mon = txrx_ops->rx.mon;
4347 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
4348 #ifdef notyet
4349 #if ATH_SUPPORT_WAPI
4350 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
4351 #endif
4352 #endif
4353 #ifdef UMAC_SUPPORT_PROXY_ARP
4354 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
4355 #endif
4356 	vdev->me_convert = txrx_ops->me_convert;
4357 
4358 	/* TODO: Enable the following once Tx code is integrated */
4359 	if (vdev->mesh_vdev)
4360 		txrx_ops->tx.tx = dp_tx_send_mesh;
4361 	else
4362 		txrx_ops->tx.tx = dp_tx_send;
4363 
4364 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
4365 
4366 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
4367 		"DP Vdev Register success");
4368 }
4369 
4370 /**
4371  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
4372  * @vdev: Datapath VDEV handle
4373  *
4374  * Return: void
4375  */
4376 static void dp_vdev_flush_peers(struct dp_vdev *vdev)
4377 {
4378 	struct dp_pdev *pdev = vdev->pdev;
4379 	struct dp_soc *soc = pdev->soc;
4380 	struct dp_peer *peer;
4381 	uint16_t *peer_ids;
4382 	uint8_t i = 0, j = 0;
4383 
4384 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
4385 	if (!peer_ids) {
4386 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4387 			"DP alloc failure - unable to flush peers");
4388 		return;
4389 	}
4390 
4391 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4392 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4393 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
4394 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
4395 				if (j < soc->max_peers)
4396 					peer_ids[j++] = peer->peer_ids[i];
4397 	}
4398 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4399 
4400 	for (i = 0; i < j ; i++) {
4401 		peer = dp_peer_find_by_id(soc, peer_ids[i]);
4402 		if (peer) {
4403 			dp_info("peer: %pM is getting flush",
4404 				peer->mac_addr.raw);
4405 			dp_peer_delete_wifi3(peer, 0);
4406 			/*
4407 			 * we need to call dp_peer_unref_del_find_by_id()
4408 			 * to remove additional ref count incremented
4409 			 * by dp_peer_find_by_id() call.
4410 			 *
4411 			 * Hold the ref count while executing
4412 			 * dp_peer_delete_wifi3() call.
4413 			 *
4414 			 */
4415 			dp_peer_unref_del_find_by_id(peer);
4416 			dp_rx_peer_unmap_handler(soc, peer_ids[i],
4417 						 vdev->vdev_id,
4418 						 peer->mac_addr.raw, 0);
4419 		}
4420 	}
4421 
4422 	qdf_mem_free(peer_ids);
4423 
4424 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4425 		FL("Flushed peers for vdev object %pK "), vdev);
4426 }
4427 
4428 /*
4429  * dp_vdev_detach_wifi3() - Detach txrx vdev
4430  * @txrx_vdev:		Datapath VDEV handle
4431  * @callback:		Callback OL_IF on completion of detach
4432  * @cb_context:	Callback context
4433  *
4434  */
4435 static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
4436 	ol_txrx_vdev_delete_cb callback, void *cb_context)
4437 {
4438 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4439 	struct dp_pdev *pdev = vdev->pdev;
4440 	struct dp_soc *soc = pdev->soc;
4441 	struct dp_neighbour_peer *peer = NULL;
4442 	struct dp_neighbour_peer *temp_peer = NULL;
4443 
4444 	/* preconditions */
4445 	qdf_assert(vdev);
4446 
4447 	if (wlan_op_mode_monitor == vdev->opmode)
4448 		goto free_vdev;
4449 
4450 	if (wlan_op_mode_sta == vdev->opmode)
4451 		dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
4452 
4453 	/*
4454 	 * If Target is hung, flush all peers before detaching vdev
4455 	 * this will free all references held due to missing
4456 	 * unmap commands from Target
4457 	 */
4458 	if ((hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET) ||
4459 	    !hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
4460 		dp_vdev_flush_peers(vdev);
4461 
4462 	/*
4463 	 * Use peer_ref_mutex while accessing peer_list, in case
4464 	 * a peer is in the process of being removed from the list.
4465 	 */
4466 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4467 	/* check that the vdev has no peers allocated */
4468 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
4469 		/* debug print - will be removed later */
4470 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
4471 			FL("not deleting vdev object %pK (%pM)"
4472 			"until deletion finishes for all its peers"),
4473 			vdev, vdev->mac_addr.raw);
4474 		/* indicate that the vdev needs to be deleted */
4475 		vdev->delete.pending = 1;
4476 		vdev->delete.callback = callback;
4477 		vdev->delete.context = cb_context;
4478 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4479 		return;
4480 	}
4481 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4482 
4483 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4484 	if (!soc->hw_nac_monitor_support) {
4485 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
4486 			      neighbour_peer_list_elem) {
4487 			QDF_ASSERT(peer->vdev != vdev);
4488 		}
4489 	} else {
4490 		TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
4491 				   neighbour_peer_list_elem, temp_peer) {
4492 			if (peer->vdev == vdev) {
4493 				TAILQ_REMOVE(&pdev->neighbour_peers_list, peer,
4494 					     neighbour_peer_list_elem);
4495 				qdf_mem_free(peer);
4496 			}
4497 		}
4498 	}
4499 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4500 
4501 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4502 	dp_tx_vdev_detach(vdev);
4503 	/* remove the vdev from its parent pdev's list */
4504 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
4505 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4506 		FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
4507 
4508 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4509 free_vdev:
4510 	qdf_mem_free(vdev);
4511 
4512 	if (callback)
4513 		callback(cb_context);
4514 }
4515 
4516 /*
4517  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
4518  * @soc - datapath soc handle
4519  * @peer - datapath peer handle
4520  *
4521  * Delete the AST entries belonging to a peer
4522  */
4523 #ifdef FEATURE_AST
4524 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
4525 					      struct dp_peer *peer)
4526 {
4527 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
4528 
4529 	qdf_spin_lock_bh(&soc->ast_lock);
4530 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
4531 		dp_peer_del_ast(soc, ast_entry);
4532 
4533 	peer->self_ast_entry = NULL;
4534 	qdf_spin_unlock_bh(&soc->ast_lock);
4535 }
4536 #else
4537 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
4538 					      struct dp_peer *peer)
4539 {
4540 }
4541 #endif
4542 
4543 #if ATH_SUPPORT_WRAP
4544 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
4545 						uint8_t *peer_mac_addr)
4546 {
4547 	struct dp_peer *peer;
4548 
4549 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
4550 				      0, vdev->vdev_id);
4551 	if (!peer)
4552 		return NULL;
4553 
4554 	if (peer->bss_peer)
4555 		return peer;
4556 
4557 	dp_peer_unref_delete(peer);
4558 	return NULL;
4559 }
4560 #else
4561 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
4562 						uint8_t *peer_mac_addr)
4563 {
4564 	struct dp_peer *peer;
4565 
4566 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
4567 				      0, vdev->vdev_id);
4568 	if (!peer)
4569 		return NULL;
4570 
4571 	if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
4572 		return peer;
4573 
4574 	dp_peer_unref_delete(peer);
4575 	return NULL;
4576 }
4577 #endif
4578 
4579 #ifdef FEATURE_AST
4580 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
4581 					       uint8_t *peer_mac_addr)
4582 {
4583 	struct dp_ast_entry *ast_entry;
4584 
4585 	qdf_spin_lock_bh(&soc->ast_lock);
4586 	ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
4587 
4588 	if (ast_entry && ast_entry->next_hop &&
4589 	    !ast_entry->delete_in_progress)
4590 		dp_peer_del_ast(soc, ast_entry);
4591 
4592 	qdf_spin_unlock_bh(&soc->ast_lock);
4593 }
4594 #endif
4595 
4596 /*
4597  * dp_peer_create_wifi3() - attach txrx peer
4598  * @txrx_vdev: Datapath VDEV handle
4599  * @peer_mac_addr: Peer MAC address
4600  *
4601  * Return: DP peeer handle on success, NULL on failure
4602  */
4603 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
4604 		uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
4605 {
4606 	struct dp_peer *peer;
4607 	int i;
4608 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4609 	struct dp_pdev *pdev;
4610 	struct dp_soc *soc;
4611 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
4612 
4613 	/* preconditions */
4614 	qdf_assert(vdev);
4615 	qdf_assert(peer_mac_addr);
4616 
4617 	pdev = vdev->pdev;
4618 	soc = pdev->soc;
4619 
4620 	/*
4621 	 * If a peer entry with given MAC address already exists,
4622 	 * reuse the peer and reset the state of peer.
4623 	 */
4624 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
4625 
4626 	if (peer) {
4627 		qdf_atomic_init(&peer->is_default_route_set);
4628 		dp_peer_cleanup(vdev, peer);
4629 
4630 		peer->delete_in_progress = false;
4631 
4632 		dp_peer_delete_ast_entries(soc, peer);
4633 
4634 		if ((vdev->opmode == wlan_op_mode_sta) &&
4635 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
4636 		     DP_MAC_ADDR_LEN)) {
4637 			ast_type = CDP_TXRX_AST_TYPE_SELF;
4638 		}
4639 
4640 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
4641 
4642 		/*
4643 		* Control path maintains a node count which is incremented
4644 		* for every new peer create command. Since new peer is not being
4645 		* created and earlier reference is reused here,
4646 		* peer_unref_delete event is sent to control path to
4647 		* increment the count back.
4648 		*/
4649 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
4650 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
4651 				peer->mac_addr.raw, vdev->mac_addr.raw,
4652 				vdev->opmode, peer->ctrl_peer, ctrl_peer);
4653 		}
4654 		peer->ctrl_peer = ctrl_peer;
4655 
4656 		dp_local_peer_id_alloc(pdev, peer);
4657 		DP_STATS_INIT(peer);
4658 
4659 		return (void *)peer;
4660 	} else {
4661 		/*
4662 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
4663 		 * need to remove the AST entry which was earlier added as a WDS
4664 		 * entry.
4665 		 * If an AST entry exists, but no peer entry exists with a given
4666 		 * MAC addresses, we could deduce it as a WDS entry
4667 		 */
4668 		dp_peer_ast_handle_roam_del(soc, peer_mac_addr);
4669 	}
4670 
4671 #ifdef notyet
4672 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
4673 		soc->mempool_ol_ath_peer);
4674 #else
4675 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
4676 #endif
4677 
4678 	if (!peer)
4679 		return NULL; /* failure */
4680 
4681 	qdf_mem_zero(peer, sizeof(struct dp_peer));
4682 
4683 	TAILQ_INIT(&peer->ast_entry_list);
4684 
4685 	/* store provided params */
4686 	peer->vdev = vdev;
4687 	peer->ctrl_peer = ctrl_peer;
4688 
4689 	if ((vdev->opmode == wlan_op_mode_sta) &&
4690 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
4691 			 DP_MAC_ADDR_LEN)) {
4692 		ast_type = CDP_TXRX_AST_TYPE_SELF;
4693 	}
4694 
4695 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
4696 
4697 	qdf_spinlock_create(&peer->peer_info_lock);
4698 
4699 	qdf_mem_copy(
4700 		&peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
4701 
4702 	/* TODO: See of rx_opt_proc is really required */
4703 	peer->rx_opt_proc = soc->rx_opt_proc;
4704 
4705 	/* initialize the peer_id */
4706 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
4707 		peer->peer_ids[i] = HTT_INVALID_PEER;
4708 
4709 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4710 
4711 	qdf_atomic_init(&peer->ref_cnt);
4712 
4713 	/* keep one reference for attach */
4714 	qdf_atomic_inc(&peer->ref_cnt);
4715 
4716 	/* add this peer into the vdev's list */
4717 	if (wlan_op_mode_sta == vdev->opmode)
4718 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
4719 	else
4720 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
4721 
4722 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4723 
4724 	/* TODO: See if hash based search is required */
4725 	dp_peer_find_hash_add(soc, peer);
4726 
4727 	/* Initialize the peer state */
4728 	peer->state = OL_TXRX_PEER_STATE_DISC;
4729 
4730 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4731 		"vdev %pK created peer %pK (%pM) ref_cnt: %d",
4732 		vdev, peer, peer->mac_addr.raw,
4733 		qdf_atomic_read(&peer->ref_cnt));
4734 	/*
4735 	 * For every peer MAp message search and set if bss_peer
4736 	 */
4737 	if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
4738 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4739 			"vdev bss_peer!!!!");
4740 		peer->bss_peer = 1;
4741 		vdev->vap_bss_peer = peer;
4742 	}
4743 	for (i = 0; i < DP_MAX_TIDS; i++)
4744 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
4745 
4746 	dp_local_peer_id_alloc(pdev, peer);
4747 	DP_STATS_INIT(peer);
4748 	return (void *)peer;
4749 }
4750 
4751 /*
4752  * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
4753  * @vdev: Datapath VDEV handle
4754  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
4755  * @hash_based: pointer to hash value (enabled/disabled) to be populated
4756  *
4757  * Return: None
4758  */
4759 static
4760 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
4761 				  enum cdp_host_reo_dest_ring *reo_dest,
4762 				  bool *hash_based)
4763 {
4764 	struct dp_soc *soc;
4765 	struct dp_pdev *pdev;
4766 
4767 	pdev = vdev->pdev;
4768 	soc = pdev->soc;
4769 	/*
4770 	 * hash based steering is disabled for Radios which are offloaded
4771 	 * to NSS
4772 	 */
4773 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
4774 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
4775 
4776 	/*
4777 	 * Below line of code will ensure the proper reo_dest ring is chosen
4778 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
4779 	 */
4780 	*reo_dest = pdev->reo_dest;
4781 }
4782 
4783 #ifdef IPA_OFFLOAD
4784 /*
4785  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
4786  * @vdev: Datapath VDEV handle
4787  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
4788  * @hash_based: pointer to hash value (enabled/disabled) to be populated
4789  *
4790  * If IPA is enabled in ini, for SAP mode, disable hash based
4791  * steering, use default reo_dst ring for RX. Use config values for other modes.
4792  * Return: None
4793  */
4794 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
4795 				       enum cdp_host_reo_dest_ring *reo_dest,
4796 				       bool *hash_based)
4797 {
4798 	struct dp_soc *soc;
4799 	struct dp_pdev *pdev;
4800 
4801 	pdev = vdev->pdev;
4802 	soc = pdev->soc;
4803 
4804 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
4805 
4806 	/*
4807 	 * If IPA is enabled, disable hash-based flow steering and set
4808 	 * reo_dest_ring_4 as the REO ring to receive packets on.
4809 	 * IPA is configured to reap reo_dest_ring_4.
4810 	 *
4811 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
4812 	 * value enum value is from 1 - 4.
4813 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
4814 	 */
4815 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4816 		if (vdev->opmode == wlan_op_mode_ap) {
4817 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
4818 			*hash_based = 0;
4819 		}
4820 	}
4821 }
4822 
4823 #else
4824 
4825 /*
4826  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
4827  * @vdev: Datapath VDEV handle
4828  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
4829  * @hash_based: pointer to hash value (enabled/disabled) to be populated
4830  *
4831  * Use system config values for hash based steering.
4832  * Return: None
4833  */
4834 
4835 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
4836 				       enum cdp_host_reo_dest_ring *reo_dest,
4837 				       bool *hash_based)
4838 {
4839 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
4840 }
4841 #endif /* IPA_OFFLOAD */
4842 
4843 /*
4844  * dp_peer_setup_wifi3() - initialize the peer
4845  * @vdev_hdl: virtual device object
4846  * @peer: Peer object
4847  *
4848  * Return: void
4849  */
4850 static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
4851 {
4852 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
4853 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4854 	struct dp_pdev *pdev;
4855 	struct dp_soc *soc;
4856 	bool hash_based = 0;
4857 	enum cdp_host_reo_dest_ring reo_dest;
4858 
4859 	/* preconditions */
4860 	qdf_assert(vdev);
4861 	qdf_assert(peer);
4862 
4863 	pdev = vdev->pdev;
4864 	soc = pdev->soc;
4865 
4866 	peer->last_assoc_rcvd = 0;
4867 	peer->last_disassoc_rcvd = 0;
4868 	peer->last_deauth_rcvd = 0;
4869 
4870 	dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
4871 
4872 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
4873 		pdev->pdev_id, vdev->vdev_id,
4874 		vdev->opmode, hash_based, reo_dest);
4875 
4876 
4877 	/*
4878 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
4879 	 * i.e both the devices have same MAC address. In these
4880 	 * cases we want such pkts to be processed in NULL Q handler
4881 	 * which is REO2TCL ring. for this reason we should
4882 	 * not setup reo_queues and default route for bss_peer.
4883 	 */
4884 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap)
4885 		return;
4886 
4887 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
4888 		/* TODO: Check the destination ring number to be passed to FW */
4889 		soc->cdp_soc.ol_ops->peer_set_default_routing(
4890 				pdev->ctrl_pdev, peer->mac_addr.raw,
4891 				peer->vdev->vdev_id, hash_based, reo_dest);
4892 	}
4893 
4894 	qdf_atomic_set(&peer->is_default_route_set, 1);
4895 
4896 	dp_peer_rx_init(pdev, peer);
4897 	return;
4898 }
4899 
4900 /*
4901  * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
4902  * @vdev_handle: virtual device object
4903  * @htt_pkt_type: type of pkt
4904  *
4905  * Return: void
4906  */
4907 static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
4908 	 enum htt_cmn_pkt_type val)
4909 {
4910 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4911 	vdev->tx_encap_type = val;
4912 }
4913 
4914 /*
4915  * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
4916  * @vdev_handle: virtual device object
4917  * @htt_pkt_type: type of pkt
4918  *
4919  * Return: void
4920  */
4921 static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
4922 	 enum htt_cmn_pkt_type val)
4923 {
4924 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4925 	vdev->rx_decap_type = val;
4926 }
4927 
4928 /*
4929  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
4930  * @txrx_soc: cdp soc handle
4931  * @ac: Access category
4932  * @value: timeout value in millisec
4933  *
4934  * Return: void
4935  */
4936 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
4937 				    uint8_t ac, uint32_t value)
4938 {
4939 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4940 
4941 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
4942 }
4943 
4944 /*
4945  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
4946  * @txrx_soc: cdp soc handle
4947  * @ac: access category
4948  * @value: timeout value in millisec
4949  *
4950  * Return: void
4951  */
4952 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
4953 				    uint8_t ac, uint32_t *value)
4954 {
4955 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4956 
4957 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
4958 }
4959 
4960 /*
4961  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
4962  * @pdev_handle: physical device object
4963  * @val: reo destination ring index (1 - 4)
4964  *
4965  * Return: void
4966  */
4967 static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
4968 	 enum cdp_host_reo_dest_ring val)
4969 {
4970 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4971 
4972 	if (pdev)
4973 		pdev->reo_dest = val;
4974 }
4975 
4976 /*
4977  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
4978  * @pdev_handle: physical device object
4979  *
4980  * Return: reo destination ring index
4981  */
4982 static enum cdp_host_reo_dest_ring
4983 dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
4984 {
4985 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4986 
4987 	if (pdev)
4988 		return pdev->reo_dest;
4989 	else
4990 		return cdp_host_reo_dest_ring_unknown;
4991 }
4992 
4993 /*
4994  * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
4995  * @pdev_handle: device object
4996  * @val: value to be set
4997  *
4998  * Return: void
4999  */
5000 static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
5001 	 uint32_t val)
5002 {
5003 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5004 
5005 	/* Enable/Disable smart mesh filtering. This flag will be checked
5006 	 * during rx processing to check if packets are from NAC clients.
5007 	 */
5008 	pdev->filter_neighbour_peers = val;
5009 	return 0;
5010 }
5011 
5012 /*
5013  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
5014  * address for smart mesh filtering
5015  * @vdev_handle: virtual device object
5016  * @cmd: Add/Del command
5017  * @macaddr: nac client mac address
5018  *
5019  * Return: void
5020  */
5021 static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
5022 					    uint32_t cmd, uint8_t *macaddr)
5023 {
5024 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5025 	struct dp_pdev *pdev = vdev->pdev;
5026 	struct dp_neighbour_peer *peer = NULL;
5027 
5028 	if (!macaddr)
5029 		goto fail0;
5030 
5031 	/* Store address of NAC (neighbour peer) which will be checked
5032 	 * against TA of received packets.
5033 	 */
5034 	if (cmd == DP_NAC_PARAM_ADD) {
5035 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
5036 				sizeof(*peer));
5037 
5038 		if (!peer) {
5039 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5040 				FL("DP neighbour peer node memory allocation failed"));
5041 			goto fail0;
5042 		}
5043 
5044 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
5045 			macaddr, DP_MAC_ADDR_LEN);
5046 		peer->vdev = vdev;
5047 
5048 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5049 
5050 		/* add this neighbour peer into the list */
5051 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
5052 				neighbour_peer_list_elem);
5053 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5054 
5055 		/* first neighbour */
5056 		if (!pdev->neighbour_peers_added) {
5057 			pdev->neighbour_peers_added = true;
5058 			dp_ppdu_ring_cfg(pdev);
5059 		}
5060 		return 1;
5061 
5062 	} else if (cmd == DP_NAC_PARAM_DEL) {
5063 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5064 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
5065 				neighbour_peer_list_elem) {
5066 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
5067 				macaddr, DP_MAC_ADDR_LEN)) {
5068 				/* delete this peer from the list */
5069 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
5070 					peer, neighbour_peer_list_elem);
5071 				qdf_mem_free(peer);
5072 				break;
5073 			}
5074 		}
5075 		/* last neighbour deleted */
5076 		if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) {
5077 			pdev->neighbour_peers_added = false;
5078 			dp_ppdu_ring_cfg(pdev);
5079 		}
5080 
5081 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5082 
5083 		if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
5084 		    !pdev->enhanced_stats_en)
5085 			dp_ppdu_ring_reset(pdev);
5086 		return 1;
5087 
5088 	}
5089 
5090 fail0:
5091 	return 0;
5092 }
5093 
5094 /*
5095  * dp_get_sec_type() - Get the security type
5096  * @peer:		Datapath peer handle
5097  * @sec_idx:    Security id (mcast, ucast)
5098  *
5099  * return sec_type: Security type
5100  */
5101 static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
5102 {
5103 	struct dp_peer *dpeer = (struct dp_peer *)peer;
5104 
5105 	return dpeer->security[sec_idx].sec_type;
5106 }
5107 
5108 /*
5109  * dp_peer_authorize() - authorize txrx peer
5110  * @peer_handle:		Datapath peer handle
5111  * @authorize
5112  *
5113  */
5114 static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
5115 {
5116 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5117 	struct dp_soc *soc;
5118 
5119 	if (peer != NULL) {
5120 		soc = peer->vdev->pdev->soc;
5121 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
5122 		peer->authorize = authorize ? 1 : 0;
5123 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5124 	}
5125 }
5126 
5127 static void dp_reset_and_release_peer_mem(struct dp_soc *soc,
5128 					  struct dp_pdev *pdev,
5129 					  struct dp_peer *peer,
5130 					  uint32_t vdev_id)
5131 {
5132 	struct dp_vdev *vdev = NULL;
5133 	struct dp_peer *bss_peer = NULL;
5134 	uint8_t *m_addr = NULL;
5135 
5136 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5137 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5138 		if (vdev->vdev_id == vdev_id)
5139 			break;
5140 	}
5141 	if (!vdev) {
5142 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5143 			  "vdev is NULL");
5144 	} else {
5145 		if (vdev->vap_bss_peer == peer)
5146 		    vdev->vap_bss_peer = NULL;
5147 		m_addr = peer->mac_addr.raw;
5148 		if (soc->cdp_soc.ol_ops->peer_unref_delete)
5149 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
5150 				m_addr, vdev->mac_addr.raw, vdev->opmode,
5151 				peer->ctrl_peer, NULL);
5152 
5153 		if (vdev && vdev->vap_bss_peer) {
5154 		    bss_peer = vdev->vap_bss_peer;
5155 		    DP_UPDATE_STATS(vdev, peer);
5156 		}
5157 	}
5158 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5159 
5160 	/*
5161 	 * Peer AST list hast to be empty here
5162 	 */
5163 	DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
5164 
5165 	qdf_mem_free(peer);
5166 }
5167 
5168 /**
5169  * dp_delete_pending_vdev() - check and process vdev delete
5170  * @pdev: DP specific pdev pointer
5171  * @vdev: DP specific vdev pointer
5172  * @vdev_id: vdev id corresponding to vdev
5173  *
5174  * This API does following:
5175  * 1) It releases tx flow pools buffers as vdev is
5176  *    going down and no peers are associated.
5177  * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory
5178  */
5179 static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev,
5180 				   uint8_t vdev_id)
5181 {
5182 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
5183 	void *vdev_delete_context = NULL;
5184 
5185 	vdev_delete_cb = vdev->delete.callback;
5186 	vdev_delete_context = vdev->delete.context;
5187 
5188 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5189 		  FL("deleting vdev object %pK (%pM)- its last peer is done"),
5190 		  vdev, vdev->mac_addr.raw);
5191 	/* all peers are gone, go ahead and delete it */
5192 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
5193 			FLOW_TYPE_VDEV, vdev_id);
5194 	dp_tx_vdev_detach(vdev);
5195 
5196 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5197 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
5198 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5199 
5200 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5201 		  FL("deleting vdev object %pK (%pM)"),
5202 		  vdev, vdev->mac_addr.raw);
5203 	qdf_mem_free(vdev);
5204 	vdev = NULL;
5205 
5206 	if (vdev_delete_cb)
5207 		vdev_delete_cb(vdev_delete_context);
5208 }
5209 
5210 /*
5211  * dp_peer_unref_delete() - unref and delete peer
5212  * @peer_handle:		Datapath peer handle
5213  *
5214  */
5215 void dp_peer_unref_delete(void *peer_handle)
5216 {
5217 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5218 	struct dp_vdev *vdev = peer->vdev;
5219 	struct dp_pdev *pdev = vdev->pdev;
5220 	struct dp_soc *soc = pdev->soc;
5221 	struct dp_peer *tmppeer;
5222 	int found = 0;
5223 	uint16_t peer_id;
5224 	uint16_t vdev_id;
5225 	bool delete_vdev;
5226 
5227 	/*
5228 	 * Hold the lock all the way from checking if the peer ref count
5229 	 * is zero until the peer references are removed from the hash
5230 	 * table and vdev list (if the peer ref count is zero).
5231 	 * This protects against a new HL tx operation starting to use the
5232 	 * peer object just after this function concludes it's done being used.
5233 	 * Furthermore, the lock needs to be held while checking whether the
5234 	 * vdev's list of peers is empty, to make sure that list is not modified
5235 	 * concurrently with the empty check.
5236 	 */
5237 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5238 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
5239 		peer_id = peer->peer_ids[0];
5240 		vdev_id = vdev->vdev_id;
5241 
5242 		/*
5243 		 * Make sure that the reference to the peer in
5244 		 * peer object map is removed
5245 		 */
5246 		if (peer_id != HTT_INVALID_PEER)
5247 			soc->peer_id_to_obj_map[peer_id] = NULL;
5248 
5249 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5250 			"Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
5251 
5252 		/* remove the reference to the peer from the hash table */
5253 		dp_peer_find_hash_remove(soc, peer);
5254 
5255 		qdf_spin_lock_bh(&soc->ast_lock);
5256 		if (peer->self_ast_entry) {
5257 			dp_peer_del_ast(soc, peer->self_ast_entry);
5258 			peer->self_ast_entry = NULL;
5259 		}
5260 		qdf_spin_unlock_bh(&soc->ast_lock);
5261 
5262 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
5263 			if (tmppeer == peer) {
5264 				found = 1;
5265 				break;
5266 			}
5267 		}
5268 
5269 		if (found) {
5270 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
5271 				peer_list_elem);
5272 		} else {
5273 			/*Ignoring the remove operation as peer not found*/
5274 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5275 				  "peer:%pK not found in vdev:%pK peerlist:%pK",
5276 				  peer, vdev, &peer->vdev->peer_list);
5277 		}
5278 
5279 		/* cleanup the peer data */
5280 		dp_peer_cleanup(vdev, peer);
5281 
5282 		/* check whether the parent vdev has no peers left */
5283 		if (TAILQ_EMPTY(&vdev->peer_list)) {
5284 			/*
5285 			 * capture vdev delete pending flag's status
5286 			 * while holding peer_ref_mutex lock
5287 			 */
5288 			delete_vdev = vdev->delete.pending;
5289 			/*
5290 			 * Now that there are no references to the peer, we can
5291 			 * release the peer reference lock.
5292 			 */
5293 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5294 			/*
5295 			 * Check if the parent vdev was waiting for its peers
5296 			 * to be deleted, in order for it to be deleted too.
5297 			 */
5298 			if (delete_vdev)
5299 				dp_delete_pending_vdev(pdev, vdev, vdev_id);
5300 		} else {
5301 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5302 		}
5303 		dp_reset_and_release_peer_mem(soc, pdev, peer, vdev_id);
5304 
5305 	} else {
5306 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5307 	}
5308 }
5309 
5310 /*
5311  * dp_peer_detach_wifi3() – Detach txrx peer
5312  * @peer_handle: Datapath peer handle
5313  * @bitmap: bitmap indicating special handling of request.
5314  *
5315  */
5316 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
5317 {
5318 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5319 
5320 	/* redirect the peer's rx delivery function to point to a
5321 	 * discard func
5322 	 */
5323 
5324 	peer->rx_opt_proc = dp_rx_discard;
5325 
5326 	/* Do not make ctrl_peer to NULL for connected sta peers.
5327 	 * We need ctrl_peer to release the reference during dp
5328 	 * peer free. This reference was held for
5329 	 * obj_mgr peer during the creation of dp peer.
5330 	 */
5331 	if (!(peer->vdev && (peer->vdev->opmode != wlan_op_mode_sta) &&
5332 	      !peer->bss_peer))
5333 		peer->ctrl_peer = NULL;
5334 
5335 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5336 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
5337 
5338 	dp_local_peer_id_free(peer->vdev->pdev, peer);
5339 	qdf_spinlock_destroy(&peer->peer_info_lock);
5340 
5341 	/*
5342 	 * Remove the reference added during peer_attach.
5343 	 * The peer will still be left allocated until the
5344 	 * PEER_UNMAP message arrives to remove the other
5345 	 * reference, added by the PEER_MAP message.
5346 	 */
5347 	dp_peer_unref_delete(peer_handle);
5348 }
5349 
5350 /*
5351  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
5352  * @peer_handle:		Datapath peer handle
5353  *
5354  */
5355 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
5356 {
5357 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
5358 	return vdev->mac_addr.raw;
5359 }
5360 
5361 /*
5362  * dp_vdev_set_wds() - Enable per packet stats
5363  * @vdev_handle: DP VDEV handle
5364  * @val: value
5365  *
5366  * Return: none
5367  */
5368 static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
5369 {
5370 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5371 
5372 	vdev->wds_enabled = val;
5373 	return 0;
5374 }
5375 
5376 /*
5377  * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
5378  * @peer_handle:		Datapath peer handle
5379  *
5380  */
5381 static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
5382 						uint8_t vdev_id)
5383 {
5384 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
5385 	struct dp_vdev *vdev = NULL;
5386 
5387 	if (qdf_unlikely(!pdev))
5388 		return NULL;
5389 
5390 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5391 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5392 		if (vdev->vdev_id == vdev_id)
5393 			break;
5394 	}
5395 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5396 
5397 	return (struct cdp_vdev *)vdev;
5398 }
5399 
5400 /*
5401  * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev handle of monitor mode
5402  * @dev: PDEV handle
5403  *
5404  * Return: VDEV handle of monitor mode
5405  */
5406 
5407 static struct cdp_vdev *dp_get_mon_vdev_from_pdev_wifi3(struct cdp_pdev *dev)
5408 {
5409 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
5410 
5411 	if (qdf_unlikely(!pdev))
5412 		return NULL;
5413 
5414 	return (struct cdp_vdev *)pdev->monitor_vdev;
5415 }
5416 
5417 static int dp_get_opmode(struct cdp_vdev *vdev_handle)
5418 {
5419 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5420 
5421 	return vdev->opmode;
5422 }
5423 
5424 static
5425 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_vdev *pvdev,
5426 					  ol_txrx_rx_fp *stack_fn_p,
5427 					  ol_osif_vdev_handle *osif_vdev_p)
5428 {
5429 	struct dp_vdev *vdev = dp_get_dp_vdev_from_cdp_vdev(pvdev);
5430 
5431 	qdf_assert(vdev);
5432 	*stack_fn_p = vdev->osif_rx_stack;
5433 	*osif_vdev_p = vdev->osif_vdev;
5434 }
5435 
5436 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
5437 {
5438 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
5439 	struct dp_pdev *pdev = vdev->pdev;
5440 
5441 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
5442 }
5443 
5444 /**
5445  * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
5446  *                                 ring based on target
5447  * @soc: soc handle
5448  * @mac_for_pdev: pdev_id
5449  * @pdev: physical device handle
5450  * @ring_num: mac id
5451  * @htt_tlv_filter: tlv filter
5452  *
5453  * Return: zero on success, non-zero on failure
5454  */
5455 static inline
5456 QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
5457 				       struct dp_pdev *pdev, uint8_t ring_num,
5458 				       struct htt_rx_ring_tlv_filter htt_tlv_filter)
5459 {
5460 	QDF_STATUS status;
5461 
5462 	if (soc->wlan_cfg_ctx->rxdma1_enable)
5463 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5464 					     pdev->rxdma_mon_buf_ring[ring_num]
5465 					     .hal_srng,
5466 					     RXDMA_MONITOR_BUF, RX_BUFFER_SIZE,
5467 					     &htt_tlv_filter);
5468 	else
5469 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5470 					     pdev->rx_mac_buf_ring[ring_num]
5471 					     .hal_srng,
5472 					     RXDMA_BUF, RX_BUFFER_SIZE,
5473 					     &htt_tlv_filter);
5474 
5475 	return status;
5476 }
5477 
5478 /**
5479  * dp_reset_monitor_mode() - Disable monitor mode
5480  * @pdev_handle: Datapath PDEV handle
5481  *
5482  * Return: 0 on success, not 0 on failure
5483  */
5484 static QDF_STATUS dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
5485 {
5486 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5487 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5488 	struct dp_soc *soc = pdev->soc;
5489 	uint8_t pdev_id;
5490 	int mac_id;
5491 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5492 
5493 	pdev_id = pdev->pdev_id;
5494 	soc = pdev->soc;
5495 
5496 	qdf_spin_lock_bh(&pdev->mon_lock);
5497 
5498 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5499 
5500 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5501 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5502 
5503 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
5504 						     pdev, mac_id,
5505 						     htt_tlv_filter);
5506 
5507 		if (status != QDF_STATUS_SUCCESS) {
5508 			dp_err("Failed to send tlv filter for monitor mode rings");
5509 			return status;
5510 		}
5511 
5512 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5513 			    pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5514 			    RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE,
5515 			    &htt_tlv_filter);
5516 	}
5517 
5518 	pdev->monitor_vdev = NULL;
5519 	pdev->mcopy_mode = 0;
5520 	pdev->monitor_configured = false;
5521 
5522 	qdf_spin_unlock_bh(&pdev->mon_lock);
5523 
5524 	return QDF_STATUS_SUCCESS;
5525 }
5526 
5527 /**
5528  * dp_set_nac() - set peer_nac
5529  * @peer_handle: Datapath PEER handle
5530  *
5531  * Return: void
5532  */
5533 static void dp_set_nac(struct cdp_peer *peer_handle)
5534 {
5535 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5536 
5537 	peer->nac = 1;
5538 }
5539 
5540 /**
5541  * dp_get_tx_pending() - read pending tx
5542  * @pdev_handle: Datapath PDEV handle
5543  *
5544  * Return: outstanding tx
5545  */
5546 static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
5547 {
5548 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5549 
5550 	return qdf_atomic_read(&pdev->num_tx_outstanding);
5551 }
5552 
5553 /**
5554  * dp_get_peer_mac_from_peer_id() - get peer mac
5555  * @pdev_handle: Datapath PDEV handle
5556  * @peer_id: Peer ID
5557  * @peer_mac: MAC addr of PEER
5558  *
5559  * Return: void
5560  */
5561 static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
5562 	uint32_t peer_id, uint8_t *peer_mac)
5563 {
5564 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5565 	struct dp_peer *peer;
5566 
5567 	if (pdev && peer_mac) {
5568 		peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
5569 		if (peer) {
5570 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
5571 				     DP_MAC_ADDR_LEN);
5572 			dp_peer_unref_del_find_by_id(peer);
5573 		}
5574 	}
5575 }
5576 
5577 /**
5578  * dp_pdev_configure_monitor_rings() - configure monitor rings
5579  * @vdev_handle: Datapath VDEV handle
5580  *
5581  * Return: void
5582  */
5583 static QDF_STATUS dp_pdev_configure_monitor_rings(struct dp_pdev *pdev)
5584 {
5585 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5586 	struct dp_soc *soc;
5587 	uint8_t pdev_id;
5588 	int mac_id;
5589 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5590 
5591 	pdev_id = pdev->pdev_id;
5592 	soc = pdev->soc;
5593 
5594 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5595 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
5596 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
5597 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
5598 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
5599 		pdev->mo_data_filter);
5600 
5601 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5602 
5603 	htt_tlv_filter.mpdu_start = 1;
5604 	htt_tlv_filter.msdu_start = 1;
5605 	htt_tlv_filter.packet = 1;
5606 	htt_tlv_filter.msdu_end = 1;
5607 	htt_tlv_filter.mpdu_end = 1;
5608 	htt_tlv_filter.packet_header = 1;
5609 	htt_tlv_filter.attention = 1;
5610 	htt_tlv_filter.ppdu_start = 0;
5611 	htt_tlv_filter.ppdu_end = 0;
5612 	htt_tlv_filter.ppdu_end_user_stats = 0;
5613 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
5614 	htt_tlv_filter.ppdu_end_status_done = 0;
5615 	htt_tlv_filter.header_per_msdu = 1;
5616 	htt_tlv_filter.enable_fp =
5617 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
5618 	htt_tlv_filter.enable_md = 0;
5619 	htt_tlv_filter.enable_mo =
5620 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
5621 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
5622 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
5623 	if (pdev->mcopy_mode)
5624 		htt_tlv_filter.fp_data_filter = 0;
5625 	else
5626 		htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
5627 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
5628 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
5629 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
5630 
5631 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5632 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5633 
5634 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
5635 						     pdev, mac_id,
5636 						     htt_tlv_filter);
5637 
5638 		if (status != QDF_STATUS_SUCCESS) {
5639 			dp_err("Failed to send tlv filter for monitor mode rings");
5640 			return status;
5641 		}
5642 	}
5643 
5644 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5645 
5646 	htt_tlv_filter.mpdu_start = 1;
5647 	htt_tlv_filter.msdu_start = 0;
5648 	htt_tlv_filter.packet = 0;
5649 	htt_tlv_filter.msdu_end = 0;
5650 	htt_tlv_filter.mpdu_end = 0;
5651 	htt_tlv_filter.attention = 0;
5652 	htt_tlv_filter.ppdu_start = 1;
5653 	htt_tlv_filter.ppdu_end = 1;
5654 	htt_tlv_filter.ppdu_end_user_stats = 1;
5655 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5656 	htt_tlv_filter.ppdu_end_status_done = 1;
5657 	htt_tlv_filter.enable_fp = 1;
5658 	htt_tlv_filter.enable_md = 0;
5659 	htt_tlv_filter.enable_mo = 1;
5660 	if (pdev->mcopy_mode) {
5661 		htt_tlv_filter.packet_header = 1;
5662 	}
5663 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5664 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5665 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5666 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5667 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5668 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
5669 
5670 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5671 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5672 						pdev->pdev_id);
5673 
5674 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5675 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5676 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5677 	}
5678 
5679 	return status;
5680 }
5681 
5682 /**
5683  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
5684  * @vdev_handle: Datapath VDEV handle
5685  * @smart_monitor: Flag to denote if its smart monitor mode
5686  *
5687  * Return: 0 on success, not 0 on failure
5688  */
5689 static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
5690 					   uint8_t smart_monitor)
5691 {
5692 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5693 	struct dp_pdev *pdev;
5694 
5695 	qdf_assert(vdev);
5696 
5697 	pdev = vdev->pdev;
5698 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
5699 		  "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
5700 		  pdev, pdev->pdev_id, pdev->soc, vdev);
5701 
5702 	/*Check if current pdev's monitor_vdev exists */
5703 	if (pdev->monitor_configured) {
5704 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5705 			  "monitor vap already created vdev=%pK\n", vdev);
5706 		qdf_assert(vdev);
5707 		return QDF_STATUS_E_RESOURCES;
5708 	}
5709 
5710 	pdev->monitor_vdev = vdev;
5711 	pdev->monitor_configured = true;
5712 
5713 	/* If smart monitor mode, do not configure monitor ring */
5714 	if (smart_monitor)
5715 		return QDF_STATUS_SUCCESS;
5716 
5717 	return dp_pdev_configure_monitor_rings(pdev);
5718 }
5719 
5720 /**
5721  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
5722  * @pdev_handle: Datapath PDEV handle
5723  * @filter_val: Flag to select Filter for monitor mode
5724  * Return: 0 on success, not 0 on failure
5725  */
5726 static QDF_STATUS
5727 dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
5728 				   struct cdp_monitor_filter *filter_val)
5729 {
5730 	/* Many monitor VAPs can exists in a system but only one can be up at
5731 	 * anytime
5732 	 */
5733 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5734 	struct dp_vdev *vdev = pdev->monitor_vdev;
5735 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5736 	struct dp_soc *soc;
5737 	uint8_t pdev_id;
5738 	int mac_id;
5739 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5740 
5741 	pdev_id = pdev->pdev_id;
5742 	soc = pdev->soc;
5743 
5744 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
5745 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
5746 		pdev, pdev_id, soc, vdev);
5747 
5748 	/*Check if current pdev's monitor_vdev exists */
5749 	if (!pdev->monitor_vdev) {
5750 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5751 			"vdev=%pK", vdev);
5752 		qdf_assert(vdev);
5753 	}
5754 
5755 	/* update filter mode, type in pdev structure */
5756 	pdev->mon_filter_mode = filter_val->mode;
5757 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
5758 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
5759 	pdev->fp_data_filter = filter_val->fp_data;
5760 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
5761 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
5762 	pdev->mo_data_filter = filter_val->mo_data;
5763 
5764 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5765 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
5766 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
5767 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
5768 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
5769 		pdev->mo_data_filter);
5770 
5771 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5772 
5773 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5774 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5775 
5776 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
5777 						     pdev, mac_id,
5778 						     htt_tlv_filter);
5779 
5780 		if (status != QDF_STATUS_SUCCESS) {
5781 			dp_err("Failed to send tlv filter for monitor mode rings");
5782 			return status;
5783 		}
5784 
5785 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5786 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5787 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5788 	}
5789 
5790 	htt_tlv_filter.mpdu_start = 1;
5791 	htt_tlv_filter.msdu_start = 1;
5792 	htt_tlv_filter.packet = 1;
5793 	htt_tlv_filter.msdu_end = 1;
5794 	htt_tlv_filter.mpdu_end = 1;
5795 	htt_tlv_filter.packet_header = 1;
5796 	htt_tlv_filter.attention = 1;
5797 	htt_tlv_filter.ppdu_start = 0;
5798 	htt_tlv_filter.ppdu_end = 0;
5799 	htt_tlv_filter.ppdu_end_user_stats = 0;
5800 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
5801 	htt_tlv_filter.ppdu_end_status_done = 0;
5802 	htt_tlv_filter.header_per_msdu = 1;
5803 	htt_tlv_filter.enable_fp =
5804 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
5805 	htt_tlv_filter.enable_md = 0;
5806 	htt_tlv_filter.enable_mo =
5807 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
5808 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
5809 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
5810 	if (pdev->mcopy_mode)
5811 		htt_tlv_filter.fp_data_filter = 0;
5812 	else
5813 		htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
5814 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
5815 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
5816 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
5817 
5818 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5819 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5820 
5821 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
5822 						     pdev, mac_id,
5823 						     htt_tlv_filter);
5824 
5825 		if (status != QDF_STATUS_SUCCESS) {
5826 			dp_err("Failed to send tlv filter for monitor mode rings");
5827 			return status;
5828 		}
5829 	}
5830 
5831 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5832 
5833 	htt_tlv_filter.mpdu_start = 1;
5834 	htt_tlv_filter.msdu_start = 0;
5835 	htt_tlv_filter.packet = 0;
5836 	htt_tlv_filter.msdu_end = 0;
5837 	htt_tlv_filter.mpdu_end = 0;
5838 	htt_tlv_filter.attention = 0;
5839 	htt_tlv_filter.ppdu_start = 1;
5840 	htt_tlv_filter.ppdu_end = 1;
5841 	htt_tlv_filter.ppdu_end_user_stats = 1;
5842 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5843 	htt_tlv_filter.ppdu_end_status_done = 1;
5844 	htt_tlv_filter.enable_fp = 1;
5845 	htt_tlv_filter.enable_md = 0;
5846 	htt_tlv_filter.enable_mo = 1;
5847 	if (pdev->mcopy_mode) {
5848 		htt_tlv_filter.packet_header = 1;
5849 	}
5850 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5851 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5852 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5853 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5854 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5855 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
5856 
5857 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5858 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5859 						pdev->pdev_id);
5860 
5861 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5862 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5863 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5864 	}
5865 
5866 	return QDF_STATUS_SUCCESS;
5867 }
5868 
5869 /**
5870  * dp_get_pdev_id_frm_pdev() - get pdev_id
5871  * @pdev_handle: Datapath PDEV handle
5872  *
5873  * Return: pdev_id
5874  */
5875 static
5876 uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
5877 {
5878 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5879 
5880 	return pdev->pdev_id;
5881 }
5882 
5883 /**
5884  * dp_pdev_set_chan_noise_floor() - set channel noise floor
5885  * @pdev_handle: Datapath PDEV handle
5886  * @chan_noise_floor: Channel Noise Floor
5887  *
5888  * Return: void
5889  */
5890 static
5891 void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
5892 				  int16_t chan_noise_floor)
5893 {
5894 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5895 
5896 	pdev->chan_noise_floor = chan_noise_floor;
5897 }
5898 
5899 /**
5900  * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
5901  * @vdev_handle: Datapath VDEV handle
5902  * Return: true on ucast filter flag set
5903  */
5904 static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
5905 {
5906 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5907 	struct dp_pdev *pdev;
5908 
5909 	pdev = vdev->pdev;
5910 
5911 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
5912 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
5913 		return true;
5914 
5915 	return false;
5916 }
5917 
5918 /**
5919  * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
5920  * @vdev_handle: Datapath VDEV handle
5921  * Return: true on mcast filter flag set
5922  */
5923 static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
5924 {
5925 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5926 	struct dp_pdev *pdev;
5927 
5928 	pdev = vdev->pdev;
5929 
5930 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
5931 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
5932 		return true;
5933 
5934 	return false;
5935 }
5936 
5937 /**
5938  * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
5939  * @vdev_handle: Datapath VDEV handle
5940  * Return: true on non data filter flag set
5941  */
5942 static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
5943 {
5944 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5945 	struct dp_pdev *pdev;
5946 
5947 	pdev = vdev->pdev;
5948 
5949 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
5950 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
5951 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
5952 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
5953 			return true;
5954 		}
5955 	}
5956 
5957 	return false;
5958 }
5959 
5960 #ifdef MESH_MODE_SUPPORT
5961 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
5962 {
5963 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5964 
5965 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5966 		FL("val %d"), val);
5967 	vdev->mesh_vdev = val;
5968 }
5969 
5970 /*
5971  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
5972  * @vdev_hdl: virtual device object
5973  * @val: value to be set
5974  *
5975  * Return: void
5976  */
5977 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
5978 {
5979 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5980 
5981 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5982 		FL("val %d"), val);
5983 	vdev->mesh_rx_filter = val;
5984 }
5985 #endif
5986 
5987 /*
5988  * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
5989  * Current scope is bar received count
5990  *
5991  * @pdev_handle: DP_PDEV handle
5992  *
5993  * Return: void
5994  */
5995 #define STATS_PROC_TIMEOUT        (HZ/1000)
5996 
5997 static void
5998 dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
5999 {
6000 	struct dp_vdev *vdev;
6001 	struct dp_peer *peer;
6002 	uint32_t waitcnt;
6003 
6004 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
6005 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6006 			if (!peer) {
6007 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6008 					FL("DP Invalid Peer refernce"));
6009 				return;
6010 			}
6011 
6012 			if (peer->delete_in_progress) {
6013 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6014 					FL("DP Peer deletion in progress"));
6015 				continue;
6016 			}
6017 			qdf_atomic_inc(&peer->ref_cnt);
6018 			waitcnt = 0;
6019 			dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
6020 			while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
6021 				&& waitcnt < 10) {
6022 				schedule_timeout_interruptible(
6023 						STATS_PROC_TIMEOUT);
6024 				waitcnt++;
6025 			}
6026 			qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
6027 			dp_peer_unref_delete(peer);
6028 		}
6029 	}
6030 }
6031 
6032 /**
6033  * dp_rx_bar_stats_cb(): BAR received stats callback
6034  * @soc: SOC handle
6035  * @cb_ctxt: Call back context
6036  * @reo_status: Reo status
6037  *
6038  * return: void
6039  */
6040 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
6041 	union hal_reo_status *reo_status)
6042 {
6043 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
6044 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
6045 
6046 	if (!qdf_atomic_read(&soc->cmn_init_done))
6047 		return;
6048 
6049 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
6050 		DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
6051 			queue_status->header.status);
6052 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
6053 		return;
6054 	}
6055 
6056 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
6057 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
6058 
6059 }
6060 
6061 /**
6062  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
6063  * @vdev: DP VDEV handle
6064  *
6065  * return: void
6066  */
6067 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
6068 			     struct cdp_vdev_stats *vdev_stats)
6069 {
6070 	struct dp_peer *peer = NULL;
6071 	struct dp_soc *soc = NULL;
6072 
6073 	if (!vdev || !vdev->pdev)
6074 		return;
6075 
6076 	soc = vdev->pdev->soc;
6077 
6078 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
6079 
6080 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
6081 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
6082 		dp_update_vdev_stats(vdev_stats, peer);
6083 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6084 
6085 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6086 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
6087 			     vdev_stats, vdev->vdev_id,
6088 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
6089 #endif
6090 }
6091 
6092 /**
6093  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
6094  * @pdev: DP PDEV handle
6095  *
6096  * return: void
6097  */
6098 static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
6099 {
6100 	struct dp_vdev *vdev = NULL;
6101 	struct cdp_vdev_stats *vdev_stats =
6102 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
6103 
6104 	if (!vdev_stats) {
6105 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6106 			  "DP alloc failure - unable to get alloc vdev stats");
6107 		return;
6108 	}
6109 
6110 	qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
6111 	qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
6112 	qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
6113 
6114 	if (pdev->mcopy_mode)
6115 		DP_UPDATE_STATS(pdev, pdev->invalid_peer);
6116 
6117 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6118 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
6119 
6120 		dp_aggregate_vdev_stats(vdev, vdev_stats);
6121 		dp_update_pdev_stats(pdev, vdev_stats);
6122 		dp_update_pdev_ingress_stats(pdev, vdev);
6123 	}
6124 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6125 	qdf_mem_free(vdev_stats);
6126 
6127 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6128 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
6129 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
6130 #endif
6131 }
6132 
6133 /**
6134  * dp_vdev_getstats() - get vdev packet level stats
6135  * @vdev_handle: Datapath VDEV handle
6136  * @stats: cdp network device stats structure
6137  *
6138  * Return: void
6139  */
6140 static void dp_vdev_getstats(void *vdev_handle,
6141 		struct cdp_dev_stats *stats)
6142 {
6143 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6144 	struct cdp_vdev_stats *vdev_stats =
6145 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
6146 
6147 	if (!vdev_stats) {
6148 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6149 			  "DP alloc failure - unable to get alloc vdev stats");
6150 		return;
6151 	}
6152 
6153 	dp_aggregate_vdev_stats(vdev, vdev_stats);
6154 
6155 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
6156 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
6157 
6158 	stats->tx_errors = vdev_stats->tx.tx_failed +
6159 		vdev_stats->tx_i.dropped.dropped_pkt.num;
6160 	stats->tx_dropped = stats->tx_errors;
6161 
6162 	stats->rx_packets = vdev_stats->rx.unicast.num +
6163 		vdev_stats->rx.multicast.num +
6164 		vdev_stats->rx.bcast.num;
6165 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
6166 		vdev_stats->rx.multicast.bytes +
6167 		vdev_stats->rx.bcast.bytes;
6168 
6169 }
6170 
6171 
6172 /**
6173  * dp_pdev_getstats() - get pdev packet level stats
6174  * @pdev_handle: Datapath PDEV handle
6175  * @stats: cdp network device stats structure
6176  *
6177  * Return: void
6178  */
6179 static void dp_pdev_getstats(void *pdev_handle,
6180 		struct cdp_dev_stats *stats)
6181 {
6182 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6183 
6184 	dp_aggregate_pdev_stats(pdev);
6185 
6186 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
6187 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
6188 
6189 	stats->tx_errors = pdev->stats.tx.tx_failed +
6190 		pdev->stats.tx_i.dropped.dropped_pkt.num;
6191 	stats->tx_dropped = stats->tx_errors;
6192 
6193 	stats->rx_packets = pdev->stats.rx.unicast.num +
6194 		pdev->stats.rx.multicast.num +
6195 		pdev->stats.rx.bcast.num;
6196 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
6197 		pdev->stats.rx.multicast.bytes +
6198 		pdev->stats.rx.bcast.bytes;
6199 }
6200 
6201 /**
6202  * dp_get_device_stats() - get interface level packet stats
6203  * @handle: device handle
6204  * @stats: cdp network device stats structure
6205  * @type: device type pdev/vdev
6206  *
6207  * Return: void
6208  */
6209 static void dp_get_device_stats(void *handle,
6210 		struct cdp_dev_stats *stats, uint8_t type)
6211 {
6212 	switch (type) {
6213 	case UPDATE_VDEV_STATS:
6214 		dp_vdev_getstats(handle, stats);
6215 		break;
6216 	case UPDATE_PDEV_STATS:
6217 		dp_pdev_getstats(handle, stats);
6218 		break;
6219 	default:
6220 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6221 			"apstats cannot be updated for this input "
6222 			"type %d", type);
6223 		break;
6224 	}
6225 
6226 }
6227 
6228 
6229 /**
6230  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
6231  * @pdev: DP_PDEV Handle
6232  *
6233  * Return:void
6234  */
6235 static inline void
6236 dp_print_pdev_tx_stats(struct dp_pdev *pdev)
6237 {
6238 	uint8_t index = 0;
6239 
6240 	DP_PRINT_STATS("PDEV Tx Stats:\n");
6241 	DP_PRINT_STATS("Received From Stack:");
6242 	DP_PRINT_STATS("	Packets = %d",
6243 			pdev->stats.tx_i.rcvd.num);
6244 	DP_PRINT_STATS("	Bytes = %llu",
6245 			pdev->stats.tx_i.rcvd.bytes);
6246 	DP_PRINT_STATS("Processed:");
6247 	DP_PRINT_STATS("	Packets = %d",
6248 			pdev->stats.tx_i.processed.num);
6249 	DP_PRINT_STATS("	Bytes = %llu",
6250 			pdev->stats.tx_i.processed.bytes);
6251 	DP_PRINT_STATS("Total Completions:");
6252 	DP_PRINT_STATS("	Packets = %u",
6253 			pdev->stats.tx.comp_pkt.num);
6254 	DP_PRINT_STATS("	Bytes = %llu",
6255 			pdev->stats.tx.comp_pkt.bytes);
6256 	DP_PRINT_STATS("Successful Completions:");
6257 	DP_PRINT_STATS("	Packets = %u",
6258 			pdev->stats.tx.tx_success.num);
6259 	DP_PRINT_STATS("	Bytes = %llu",
6260 			pdev->stats.tx.tx_success.bytes);
6261 	DP_PRINT_STATS("Dropped:");
6262 	DP_PRINT_STATS("	Total = %d",
6263 			pdev->stats.tx_i.dropped.dropped_pkt.num);
6264 	DP_PRINT_STATS("	Dma_map_error = %d",
6265 			pdev->stats.tx_i.dropped.dma_error);
6266 	DP_PRINT_STATS("	Ring Full = %d",
6267 			pdev->stats.tx_i.dropped.ring_full);
6268 	DP_PRINT_STATS("	Descriptor Not available = %d",
6269 			pdev->stats.tx_i.dropped.desc_na.num);
6270 	DP_PRINT_STATS("	HW enqueue failed= %d",
6271 			pdev->stats.tx_i.dropped.enqueue_fail);
6272 	DP_PRINT_STATS("	Resources Full = %d",
6273 			pdev->stats.tx_i.dropped.res_full);
6274 	DP_PRINT_STATS("	FW removed Pkts = %u",
6275 		       pdev->stats.tx.dropped.fw_rem.num);
6276 	DP_PRINT_STATS("	FW removed bytes= %llu",
6277 		       pdev->stats.tx.dropped.fw_rem.bytes);
6278 	DP_PRINT_STATS("	FW removed transmitted = %d",
6279 			pdev->stats.tx.dropped.fw_rem_tx);
6280 	DP_PRINT_STATS("	FW removed untransmitted = %d",
6281 			pdev->stats.tx.dropped.fw_rem_notx);
6282 	DP_PRINT_STATS("	FW removed untransmitted fw_reason1 = %d",
6283 			pdev->stats.tx.dropped.fw_reason1);
6284 	DP_PRINT_STATS("	FW removed untransmitted fw_reason2 = %d",
6285 			pdev->stats.tx.dropped.fw_reason2);
6286 	DP_PRINT_STATS("	FW removed untransmitted fw_reason3 = %d",
6287 			pdev->stats.tx.dropped.fw_reason3);
6288 	DP_PRINT_STATS("	Aged Out from msdu/mpdu queues = %d",
6289 			pdev->stats.tx.dropped.age_out);
6290 	DP_PRINT_STATS("	headroom insufficient = %d",
6291 			pdev->stats.tx_i.dropped.headroom_insufficient);
6292 	DP_PRINT_STATS("	Multicast:");
6293 	DP_PRINT_STATS("	Packets: %u",
6294 		       pdev->stats.tx.mcast.num);
6295 	DP_PRINT_STATS("	Bytes: %llu",
6296 		       pdev->stats.tx.mcast.bytes);
6297 	DP_PRINT_STATS("Scatter Gather:");
6298 	DP_PRINT_STATS("	Packets = %d",
6299 			pdev->stats.tx_i.sg.sg_pkt.num);
6300 	DP_PRINT_STATS("	Bytes = %llu",
6301 			pdev->stats.tx_i.sg.sg_pkt.bytes);
6302 	DP_PRINT_STATS("	Dropped By Host = %d",
6303 			pdev->stats.tx_i.sg.dropped_host.num);
6304 	DP_PRINT_STATS("	Dropped By Target = %d",
6305 			pdev->stats.tx_i.sg.dropped_target);
6306 	DP_PRINT_STATS("TSO:");
6307 	DP_PRINT_STATS("	Number of Segments = %d",
6308 			pdev->stats.tx_i.tso.num_seg);
6309 	DP_PRINT_STATS("	Packets = %d",
6310 			pdev->stats.tx_i.tso.tso_pkt.num);
6311 	DP_PRINT_STATS("	Bytes = %llu",
6312 			pdev->stats.tx_i.tso.tso_pkt.bytes);
6313 	DP_PRINT_STATS("	Dropped By Host = %d",
6314 			pdev->stats.tx_i.tso.dropped_host.num);
6315 	DP_PRINT_STATS("Mcast Enhancement:");
6316 	DP_PRINT_STATS("	Packets = %d",
6317 			pdev->stats.tx_i.mcast_en.mcast_pkt.num);
6318 	DP_PRINT_STATS("	Bytes = %llu",
6319 			pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
6320 	DP_PRINT_STATS("	Dropped: Map Errors = %d",
6321 			pdev->stats.tx_i.mcast_en.dropped_map_error);
6322 	DP_PRINT_STATS("	Dropped: Self Mac = %d",
6323 			pdev->stats.tx_i.mcast_en.dropped_self_mac);
6324 	DP_PRINT_STATS("	Dropped: Send Fail = %d",
6325 			pdev->stats.tx_i.mcast_en.dropped_send_fail);
6326 	DP_PRINT_STATS("	Unicast sent = %d",
6327 			pdev->stats.tx_i.mcast_en.ucast);
6328 	DP_PRINT_STATS("Raw:");
6329 	DP_PRINT_STATS("	Packets = %d",
6330 			pdev->stats.tx_i.raw.raw_pkt.num);
6331 	DP_PRINT_STATS("	Bytes = %llu",
6332 			pdev->stats.tx_i.raw.raw_pkt.bytes);
6333 	DP_PRINT_STATS("	DMA map error = %d",
6334 			pdev->stats.tx_i.raw.dma_map_error);
6335 	DP_PRINT_STATS("Reinjected:");
6336 	DP_PRINT_STATS("	Packets = %d",
6337 			pdev->stats.tx_i.reinject_pkts.num);
6338 	DP_PRINT_STATS("	Bytes = %llu\n",
6339 			pdev->stats.tx_i.reinject_pkts.bytes);
6340 	DP_PRINT_STATS("Inspected:");
6341 	DP_PRINT_STATS("	Packets = %d",
6342 			pdev->stats.tx_i.inspect_pkts.num);
6343 	DP_PRINT_STATS("	Bytes = %llu",
6344 			pdev->stats.tx_i.inspect_pkts.bytes);
6345 	DP_PRINT_STATS("Nawds Multicast:");
6346 	DP_PRINT_STATS("	Packets = %d",
6347 			pdev->stats.tx_i.nawds_mcast.num);
6348 	DP_PRINT_STATS("	Bytes = %llu",
6349 			pdev->stats.tx_i.nawds_mcast.bytes);
6350 	DP_PRINT_STATS("CCE Classified:");
6351 	DP_PRINT_STATS("	CCE Classified Packets: %u",
6352 			pdev->stats.tx_i.cce_classified);
6353 	DP_PRINT_STATS("	RAW CCE Classified Packets: %u",
6354 			pdev->stats.tx_i.cce_classified_raw);
6355 	DP_PRINT_STATS("Mesh stats:");
6356 	DP_PRINT_STATS("	frames to firmware: %u",
6357 			pdev->stats.tx_i.mesh.exception_fw);
6358 	DP_PRINT_STATS("	completions from fw: %u",
6359 			pdev->stats.tx_i.mesh.completion_fw);
6360 	DP_PRINT_STATS("PPDU stats counter");
6361 	for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
6362 		DP_PRINT_STATS("	Tag[%d] = %llu", index,
6363 				pdev->stats.ppdu_stats_counter[index]);
6364 	}
6365 
6366 }
6367 
6368 /**
6369  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
6370  * @pdev: DP_PDEV Handle
6371  *
6372  * Return: void
6373  */
6374 static inline void
6375 dp_print_pdev_rx_stats(struct dp_pdev *pdev)
6376 {
6377 	DP_PRINT_STATS("PDEV Rx Stats:\n");
6378 	DP_PRINT_STATS("Received From HW (Per Rx Ring):");
6379 	DP_PRINT_STATS("	Packets = %d %d %d %d",
6380 			pdev->stats.rx.rcvd_reo[0].num,
6381 			pdev->stats.rx.rcvd_reo[1].num,
6382 			pdev->stats.rx.rcvd_reo[2].num,
6383 			pdev->stats.rx.rcvd_reo[3].num);
6384 	DP_PRINT_STATS("	Bytes = %llu %llu %llu %llu",
6385 			pdev->stats.rx.rcvd_reo[0].bytes,
6386 			pdev->stats.rx.rcvd_reo[1].bytes,
6387 			pdev->stats.rx.rcvd_reo[2].bytes,
6388 			pdev->stats.rx.rcvd_reo[3].bytes);
6389 	DP_PRINT_STATS("Replenished:");
6390 	DP_PRINT_STATS("	Packets = %d",
6391 			pdev->stats.replenish.pkts.num);
6392 	DP_PRINT_STATS("	Bytes = %llu",
6393 			pdev->stats.replenish.pkts.bytes);
6394 	DP_PRINT_STATS("	Buffers Added To Freelist = %d",
6395 			pdev->stats.buf_freelist);
6396 	DP_PRINT_STATS("	Low threshold intr = %d",
6397 			pdev->stats.replenish.low_thresh_intrs);
6398 	DP_PRINT_STATS("Dropped:");
6399 	DP_PRINT_STATS("	msdu_not_done = %d",
6400 			pdev->stats.dropped.msdu_not_done);
6401 	DP_PRINT_STATS("        mon_rx_drop = %d",
6402 			pdev->stats.dropped.mon_rx_drop);
6403 	DP_PRINT_STATS("        mec_drop = %d",
6404 		       pdev->stats.rx.mec_drop.num);
6405 	DP_PRINT_STATS("	Bytes = %llu",
6406 		       pdev->stats.rx.mec_drop.bytes);
6407 	DP_PRINT_STATS("Sent To Stack:");
6408 	DP_PRINT_STATS("	Packets = %d",
6409 			pdev->stats.rx.to_stack.num);
6410 	DP_PRINT_STATS("	Bytes = %llu",
6411 			pdev->stats.rx.to_stack.bytes);
6412 	DP_PRINT_STATS("Multicast/Broadcast:");
6413 	DP_PRINT_STATS("	Packets = %d",
6414 			pdev->stats.rx.multicast.num);
6415 	DP_PRINT_STATS("	Bytes = %llu",
6416 			pdev->stats.rx.multicast.bytes);
6417 	DP_PRINT_STATS("Errors:");
6418 	DP_PRINT_STATS("	Rxdma Ring Un-inititalized = %d",
6419 			pdev->stats.replenish.rxdma_err);
6420 	DP_PRINT_STATS("	Desc Alloc Failed: = %d",
6421 			pdev->stats.err.desc_alloc_fail);
6422 	DP_PRINT_STATS("	IP checksum error = %d",
6423 		       pdev->stats.err.ip_csum_err);
6424 	DP_PRINT_STATS("	TCP/UDP checksum error = %d",
6425 		       pdev->stats.err.tcp_udp_csum_err);
6426 
6427 	/* Get bar_recv_cnt */
6428 	dp_aggregate_pdev_ctrl_frames_stats(pdev);
6429 	DP_PRINT_STATS("BAR Received Count: = %d",
6430 			pdev->stats.rx.bar_recv_cnt);
6431 
6432 }
6433 
6434 /**
6435  * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
6436  * @pdev: DP_PDEV Handle
6437  *
6438  * Return: void
6439  */
6440 static inline void
6441 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
6442 {
6443 	struct cdp_pdev_mon_stats *rx_mon_stats;
6444 
6445 	rx_mon_stats = &pdev->rx_mon_stats;
6446 
6447 	DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
6448 
6449 	dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
6450 
6451 	DP_PRINT_STATS("status_ppdu_done_cnt = %d",
6452 		       rx_mon_stats->status_ppdu_done);
6453 	DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
6454 		       rx_mon_stats->dest_ppdu_done);
6455 	DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
6456 		       rx_mon_stats->dest_mpdu_done);
6457 	DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
6458 		       rx_mon_stats->dest_mpdu_drop);
6459 	DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d",
6460 		       rx_mon_stats->dup_mon_linkdesc_cnt);
6461 	DP_PRINT_STATS("dup_mon_buf_cnt = %d",
6462 		       rx_mon_stats->dup_mon_buf_cnt);
6463 }
6464 
6465 /**
6466  * dp_print_soc_tx_stats(): Print SOC level  stats
6467  * @soc DP_SOC Handle
6468  *
6469  * Return: void
6470  */
6471 static inline void
6472 dp_print_soc_tx_stats(struct dp_soc *soc)
6473 {
6474 	uint8_t desc_pool_id;
6475 	soc->stats.tx.desc_in_use = 0;
6476 
6477 	DP_PRINT_STATS("SOC Tx Stats:\n");
6478 
6479 	for (desc_pool_id = 0;
6480 	     desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6481 	     desc_pool_id++)
6482 		soc->stats.tx.desc_in_use +=
6483 			soc->tx_desc[desc_pool_id].num_allocated;
6484 
6485 	DP_PRINT_STATS("Tx Descriptors In Use = %d",
6486 			soc->stats.tx.desc_in_use);
6487 	DP_PRINT_STATS("Tx Invalid peer:");
6488 	DP_PRINT_STATS("	Packets = %d",
6489 			soc->stats.tx.tx_invalid_peer.num);
6490 	DP_PRINT_STATS("	Bytes = %llu",
6491 			soc->stats.tx.tx_invalid_peer.bytes);
6492 	DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
6493 			soc->stats.tx.tcl_ring_full[0],
6494 			soc->stats.tx.tcl_ring_full[1],
6495 			soc->stats.tx.tcl_ring_full[2]);
6496 
6497 }
6498 /**
6499  * dp_print_soc_rx_stats: Print SOC level Rx stats
6500  * @soc: DP_SOC Handle
6501  *
6502  * Return:void
6503  */
6504 static inline void
6505 dp_print_soc_rx_stats(struct dp_soc *soc)
6506 {
6507 	uint32_t i;
6508 	char reo_error[DP_REO_ERR_LENGTH];
6509 	char rxdma_error[DP_RXDMA_ERR_LENGTH];
6510 	uint8_t index = 0;
6511 
6512 	DP_PRINT_STATS("SOC Rx Stats:\n");
6513 	DP_PRINT_STATS("Fragmented packets: %u",
6514 		       soc->stats.rx.rx_frags);
6515 	DP_PRINT_STATS("Reo reinjected packets: %u",
6516 		       soc->stats.rx.reo_reinject);
6517 	DP_PRINT_STATS("Errors:\n");
6518 	DP_PRINT_STATS("Rx Decrypt Errors = %d",
6519 			(soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
6520 			soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
6521 	DP_PRINT_STATS("Invalid RBM = %d",
6522 			soc->stats.rx.err.invalid_rbm);
6523 	DP_PRINT_STATS("Invalid Vdev = %d",
6524 			soc->stats.rx.err.invalid_vdev);
6525 	DP_PRINT_STATS("Invalid Pdev = %d",
6526 			soc->stats.rx.err.invalid_pdev);
6527 	DP_PRINT_STATS("Invalid Peer = %d",
6528 			soc->stats.rx.err.rx_invalid_peer.num);
6529 	DP_PRINT_STATS("HAL Ring Access Fail = %d",
6530 			soc->stats.rx.err.hal_ring_access_fail);
6531 	DP_PRINT_STATS("RX frags: %d", soc->stats.rx.rx_frags);
6532 	DP_PRINT_STATS("RX frag wait: %d", soc->stats.rx.rx_frag_wait);
6533 	DP_PRINT_STATS("RX frag err: %d", soc->stats.rx.rx_frag_err);
6534 	DP_PRINT_STATS("RX HP out_of_sync: %d", soc->stats.rx.hp_oos);
6535 	DP_PRINT_STATS("RX DUP DESC: %d",
6536 		       soc->stats.rx.err.hal_reo_dest_dup);
6537 	DP_PRINT_STATS("RX REL DUP DESC: %d",
6538 		       soc->stats.rx.err.hal_wbm_rel_dup);
6539 
6540 	for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
6541 		index += qdf_snprint(&rxdma_error[index],
6542 				DP_RXDMA_ERR_LENGTH - index,
6543 				" %d", soc->stats.rx.err.rxdma_error[i]);
6544 	}
6545 	DP_PRINT_STATS("RXDMA Error (0-31):%s",
6546 			rxdma_error);
6547 
6548 	index = 0;
6549 	for (i = 0; i < HAL_REO_ERR_MAX; i++) {
6550 		index += qdf_snprint(&reo_error[index],
6551 				DP_REO_ERR_LENGTH - index,
6552 				" %d", soc->stats.rx.err.reo_error[i]);
6553 	}
6554 	DP_PRINT_STATS("REO Error(0-14):%s",
6555 			reo_error);
6556 }
6557 
6558 /**
6559  * dp_srng_get_str_from_ring_type() - Return string name for a ring
6560  * @ring_type: Ring
6561  *
6562  * Return: char const pointer
6563  */
6564 static inline const
6565 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
6566 {
6567 	switch (ring_type) {
6568 	case REO_DST:
6569 		return "Reo_dst";
6570 	case REO_EXCEPTION:
6571 		return "Reo_exception";
6572 	case REO_CMD:
6573 		return "Reo_cmd";
6574 	case REO_REINJECT:
6575 		return "Reo_reinject";
6576 	case REO_STATUS:
6577 		return "Reo_status";
6578 	case WBM2SW_RELEASE:
6579 		return "wbm2sw_release";
6580 	case TCL_DATA:
6581 		return "tcl_data";
6582 	case TCL_CMD:
6583 		return "tcl_cmd";
6584 	case TCL_STATUS:
6585 		return "tcl_status";
6586 	case SW2WBM_RELEASE:
6587 		return "sw2wbm_release";
6588 	case RXDMA_BUF:
6589 		return "Rxdma_buf";
6590 	case RXDMA_DST:
6591 		return "Rxdma_dst";
6592 	case RXDMA_MONITOR_BUF:
6593 		return "Rxdma_monitor_buf";
6594 	case RXDMA_MONITOR_DESC:
6595 		return "Rxdma_monitor_desc";
6596 	case RXDMA_MONITOR_STATUS:
6597 		return "Rxdma_monitor_status";
6598 	default:
6599 		dp_err("Invalid ring type");
6600 		break;
6601 	}
6602 	return "Invalid";
6603 }
6604 
6605 /**
6606  * dp_print_ring_stat_from_hal(): Print hal level ring stats
6607  * @soc: DP_SOC handle
6608  * @srng: DP_SRNG handle
6609  * @ring_name: SRNG name
6610  * @ring_type: srng src/dst ring
6611  *
6612  * Return: void
6613  */
6614 static void
6615 dp_print_ring_stat_from_hal(struct dp_soc *soc,  struct dp_srng *srng,
6616 			    enum hal_ring_type ring_type)
6617 {
6618 	uint32_t tailp;
6619 	uint32_t headp;
6620 	int32_t hw_headp = -1;
6621 	int32_t hw_tailp = -1;
6622 	const char *ring_name;
6623 	struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
6624 
6625 	if (soc && srng && srng->hal_srng) {
6626 		ring_name = dp_srng_get_str_from_hal_ring_type(ring_type);
6627 
6628 		hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
6629 
6630 		DP_PRINT_STATS("%s:SW:Head pointer = %d Tail Pointer = %d\n",
6631 			       ring_name, headp, tailp);
6632 
6633 		hal_get_hw_hptp(hal_soc, srng->hal_srng, &hw_headp,
6634 				&hw_tailp, ring_type);
6635 
6636 		DP_PRINT_STATS("%s:HW:Head pointer = %d Tail Pointer = %d\n",
6637 			       ring_name, hw_headp, hw_tailp);
6638 	}
6639 
6640 }
6641 
6642 /**
6643  * dp_print_mon_ring_stats_from_hal() - Print stat for monitor rings based
6644  *					on target
6645  * @pdev: physical device handle
6646  * @mac_id: mac id
6647  *
6648  * Return: void
6649  */
6650 static inline
6651 void dp_print_mon_ring_stat_from_hal(struct dp_pdev *pdev, uint8_t mac_id)
6652 {
6653 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) {
6654 		dp_print_ring_stat_from_hal(pdev->soc,
6655 					    &pdev->rxdma_mon_buf_ring[mac_id],
6656 					    RXDMA_MONITOR_BUF);
6657 		dp_print_ring_stat_from_hal(pdev->soc,
6658 					    &pdev->rxdma_mon_dst_ring[mac_id],
6659 					    RXDMA_MONITOR_DST);
6660 		dp_print_ring_stat_from_hal(pdev->soc,
6661 					    &pdev->rxdma_mon_desc_ring[mac_id],
6662 					    RXDMA_MONITOR_DESC);
6663 	}
6664 
6665 	dp_print_ring_stat_from_hal(pdev->soc,
6666 				    &pdev->rxdma_mon_status_ring[mac_id],
6667 				    RXDMA_MONITOR_STATUS);
6668 }
6669 
6670 /**
6671  * dp_print_ring_stats(): Print tail and head pointer
6672  * @pdev: DP_PDEV handle
6673  *
6674  * Return:void
6675  */
6676 static inline void
6677 dp_print_ring_stats(struct dp_pdev *pdev)
6678 {
6679 	uint32_t i;
6680 	int mac_id;
6681 
6682 	dp_print_ring_stat_from_hal(pdev->soc,
6683 				    &pdev->soc->reo_exception_ring,
6684 				    REO_EXCEPTION);
6685 	dp_print_ring_stat_from_hal(pdev->soc,
6686 				    &pdev->soc->reo_reinject_ring,
6687 				    REO_REINJECT);
6688 	dp_print_ring_stat_from_hal(pdev->soc,
6689 				    &pdev->soc->reo_cmd_ring,
6690 				    REO_CMD);
6691 	dp_print_ring_stat_from_hal(pdev->soc,
6692 				    &pdev->soc->reo_status_ring,
6693 				    REO_STATUS);
6694 	dp_print_ring_stat_from_hal(pdev->soc,
6695 				    &pdev->soc->rx_rel_ring,
6696 				    WBM2SW_RELEASE);
6697 	dp_print_ring_stat_from_hal(pdev->soc,
6698 				    &pdev->soc->tcl_cmd_ring,
6699 				    TCL_CMD);
6700 	dp_print_ring_stat_from_hal(pdev->soc,
6701 				    &pdev->soc->tcl_status_ring,
6702 				    TCL_STATUS);
6703 	dp_print_ring_stat_from_hal(pdev->soc,
6704 				    &pdev->soc->wbm_desc_rel_ring,
6705 				    SW2WBM_RELEASE);
6706 	for (i = 0; i < MAX_REO_DEST_RINGS; i++)
6707 		dp_print_ring_stat_from_hal(pdev->soc,
6708 					    &pdev->soc->reo_dest_ring[i],
6709 					    REO_DST);
6710 
6711 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++)
6712 		dp_print_ring_stat_from_hal(pdev->soc,
6713 					    &pdev->soc->tcl_data_ring[i],
6714 					    TCL_DATA);
6715 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++)
6716 		dp_print_ring_stat_from_hal(pdev->soc,
6717 					    &pdev->soc->tx_comp_ring[i],
6718 					    WBM2SW_RELEASE);
6719 
6720 	dp_print_ring_stat_from_hal(pdev->soc,
6721 				    &pdev->rx_refill_buf_ring,
6722 				    RXDMA_BUF);
6723 
6724 	dp_print_ring_stat_from_hal(pdev->soc,
6725 				    &pdev->rx_refill_buf_ring2,
6726 				    RXDMA_BUF);
6727 
6728 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
6729 		dp_print_ring_stat_from_hal(pdev->soc,
6730 					    &pdev->rx_mac_buf_ring[i],
6731 					    RXDMA_BUF);
6732 
6733 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
6734 		dp_print_mon_ring_stat_from_hal(pdev, mac_id);
6735 
6736 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++)
6737 		dp_print_ring_stat_from_hal(pdev->soc,
6738 					    &pdev->rxdma_err_dst_ring[i],
6739 					    RXDMA_DST);
6740 
6741 }
6742 
6743 /**
6744  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
6745  * @vdev: DP_VDEV handle
6746  *
6747  * Return:void
6748  */
6749 static inline void
6750 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
6751 {
6752 	struct dp_peer *peer = NULL;
6753 
6754 	if (!vdev || !vdev->pdev)
6755 		return;
6756 
6757 	DP_STATS_CLR(vdev->pdev);
6758 	DP_STATS_CLR(vdev->pdev->soc);
6759 	DP_STATS_CLR(vdev);
6760 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6761 		if (!peer)
6762 			return;
6763 		DP_STATS_CLR(peer);
6764 
6765 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6766 		dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
6767 				     &peer->stats,  peer->peer_ids[0],
6768 				     UPDATE_PEER_STATS, vdev->pdev->pdev_id);
6769 #endif
6770 	}
6771 
6772 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6773 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
6774 			     &vdev->stats,  vdev->vdev_id,
6775 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
6776 #endif
6777 }
6778 
6779 /**
6780  * dp_print_common_rates_info(): Print common rate for tx or rx
6781  * @pkt_type_array: rate type array contains rate info
6782  *
6783  * Return:void
6784  */
6785 static inline void
6786 dp_print_common_rates_info(struct cdp_pkt_type *pkt_type_array)
6787 {
6788 	uint8_t mcs, pkt_type;
6789 
6790 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
6791 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
6792 			if (!dp_rate_string[pkt_type][mcs].valid)
6793 				continue;
6794 
6795 			DP_PRINT_STATS("	%s = %d",
6796 				       dp_rate_string[pkt_type][mcs].mcs_type,
6797 				       pkt_type_array[pkt_type].mcs_count[mcs]);
6798 		}
6799 
6800 		DP_PRINT_STATS("\n");
6801 	}
6802 }
6803 
6804 /**
6805  * dp_print_rx_rates(): Print Rx rate stats
6806  * @vdev: DP_VDEV handle
6807  *
6808  * Return:void
6809  */
6810 static inline void
6811 dp_print_rx_rates(struct dp_vdev *vdev)
6812 {
6813 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6814 	uint8_t i;
6815 	uint8_t index = 0;
6816 	char nss[DP_NSS_LENGTH];
6817 
6818 	DP_PRINT_STATS("Rx Rate Info:\n");
6819 	dp_print_common_rates_info(pdev->stats.rx.pkt_type);
6820 
6821 
6822 	index = 0;
6823 	for (i = 0; i < SS_COUNT; i++) {
6824 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6825 				" %d", pdev->stats.rx.nss[i]);
6826 	}
6827 	DP_PRINT_STATS("NSS(1-8) = %s",
6828 			nss);
6829 
6830 	DP_PRINT_STATS("SGI ="
6831 			" 0.8us %d,"
6832 			" 0.4us %d,"
6833 			" 1.6us %d,"
6834 			" 3.2us %d,",
6835 			pdev->stats.rx.sgi_count[0],
6836 			pdev->stats.rx.sgi_count[1],
6837 			pdev->stats.rx.sgi_count[2],
6838 			pdev->stats.rx.sgi_count[3]);
6839 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
6840 			pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
6841 			pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
6842 	DP_PRINT_STATS("Reception Type ="
6843 			" SU: %d,"
6844 			" MU_MIMO:%d,"
6845 			" MU_OFDMA:%d,"
6846 			" MU_OFDMA_MIMO:%d\n",
6847 			pdev->stats.rx.reception_type[0],
6848 			pdev->stats.rx.reception_type[1],
6849 			pdev->stats.rx.reception_type[2],
6850 			pdev->stats.rx.reception_type[3]);
6851 	DP_PRINT_STATS("Aggregation:\n");
6852 	DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
6853 			pdev->stats.rx.ampdu_cnt);
6854 	DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
6855 			pdev->stats.rx.non_ampdu_cnt);
6856 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
6857 			pdev->stats.rx.amsdu_cnt);
6858 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
6859 			pdev->stats.rx.non_amsdu_cnt);
6860 }
6861 
6862 /**
6863  * dp_print_tx_rates(): Print tx rates
6864  * @vdev: DP_VDEV handle
6865  *
6866  * Return:void
6867  */
6868 static inline void
6869 dp_print_tx_rates(struct dp_vdev *vdev)
6870 {
6871 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6872 	uint8_t index;
6873 	char nss[DP_NSS_LENGTH];
6874 	int nss_index;
6875 
6876 	DP_PRINT_STATS("Tx Rate Info:\n");
6877 	dp_print_common_rates_info(pdev->stats.tx.pkt_type);
6878 
6879 	DP_PRINT_STATS("SGI ="
6880 			" 0.8us %d"
6881 			" 0.4us %d"
6882 			" 1.6us %d"
6883 			" 3.2us %d",
6884 			pdev->stats.tx.sgi_count[0],
6885 			pdev->stats.tx.sgi_count[1],
6886 			pdev->stats.tx.sgi_count[2],
6887 			pdev->stats.tx.sgi_count[3]);
6888 
6889 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
6890 			pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
6891 			pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
6892 
6893 	index = 0;
6894 	for (nss_index = 0; nss_index < SS_COUNT; nss_index++) {
6895 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6896 				" %d", pdev->stats.tx.nss[nss_index]);
6897 	}
6898 
6899 	DP_PRINT_STATS("NSS(1-8) = %s", nss);
6900 	DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
6901 	DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
6902 	DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
6903 	DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
6904 	DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
6905 
6906 	DP_PRINT_STATS("Aggregation:\n");
6907 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
6908 			pdev->stats.tx.amsdu_cnt);
6909 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
6910 			pdev->stats.tx.non_amsdu_cnt);
6911 }
6912 
6913 /**
6914  * dp_print_peer_stats():print peer stats
6915  * @peer: DP_PEER handle
6916  *
6917  * return void
6918  */
6919 static inline void dp_print_peer_stats(struct dp_peer *peer)
6920 {
6921 	uint8_t i;
6922 	uint32_t index;
6923 	char nss[DP_NSS_LENGTH];
6924 	DP_PRINT_STATS("Node Tx Stats:\n");
6925 	DP_PRINT_STATS("Total Packet Completions = %d",
6926 			peer->stats.tx.comp_pkt.num);
6927 	DP_PRINT_STATS("Total Bytes Completions = %llu",
6928 			peer->stats.tx.comp_pkt.bytes);
6929 	DP_PRINT_STATS("Success Packets = %d",
6930 			peer->stats.tx.tx_success.num);
6931 	DP_PRINT_STATS("Success Bytes = %llu",
6932 			peer->stats.tx.tx_success.bytes);
6933 	DP_PRINT_STATS("Unicast Success Packets = %d",
6934 			peer->stats.tx.ucast.num);
6935 	DP_PRINT_STATS("Unicast Success Bytes = %llu",
6936 			peer->stats.tx.ucast.bytes);
6937 	DP_PRINT_STATS("Multicast Success Packets = %d",
6938 			peer->stats.tx.mcast.num);
6939 	DP_PRINT_STATS("Multicast Success Bytes = %llu",
6940 			peer->stats.tx.mcast.bytes);
6941 	DP_PRINT_STATS("Broadcast Success Packets = %d",
6942 			peer->stats.tx.bcast.num);
6943 	DP_PRINT_STATS("Broadcast Success Bytes = %llu",
6944 			peer->stats.tx.bcast.bytes);
6945 	DP_PRINT_STATS("Packets Failed = %d",
6946 			peer->stats.tx.tx_failed);
6947 	DP_PRINT_STATS("Packets In OFDMA = %d",
6948 			peer->stats.tx.ofdma);
6949 	DP_PRINT_STATS("Packets In STBC = %d",
6950 			peer->stats.tx.stbc);
6951 	DP_PRINT_STATS("Packets In LDPC = %d",
6952 			peer->stats.tx.ldpc);
6953 	DP_PRINT_STATS("Packet Retries = %d",
6954 			peer->stats.tx.retries);
6955 	DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
6956 			peer->stats.tx.amsdu_cnt);
6957 	DP_PRINT_STATS("Last Packet RSSI = %d",
6958 			peer->stats.tx.last_ack_rssi);
6959 	DP_PRINT_STATS("Dropped At FW: Removed Pkts = %u",
6960 		       peer->stats.tx.dropped.fw_rem.num);
6961 	DP_PRINT_STATS("Dropped At FW: Removed bytes = %llu",
6962 		       peer->stats.tx.dropped.fw_rem.bytes);
6963 	DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
6964 			peer->stats.tx.dropped.fw_rem_tx);
6965 	DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
6966 			peer->stats.tx.dropped.fw_rem_notx);
6967 	DP_PRINT_STATS("Dropped : Age Out = %d",
6968 			peer->stats.tx.dropped.age_out);
6969 	DP_PRINT_STATS("NAWDS : ");
6970 	DP_PRINT_STATS("	Nawds multicast Drop Tx Packet = %d",
6971 			peer->stats.tx.nawds_mcast_drop);
6972 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Count = %d",
6973 			peer->stats.tx.nawds_mcast.num);
6974 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Bytes = %llu",
6975 			peer->stats.tx.nawds_mcast.bytes);
6976 
6977 	DP_PRINT_STATS("Rate Info:");
6978 	dp_print_common_rates_info(peer->stats.tx.pkt_type);
6979 
6980 
6981 	DP_PRINT_STATS("SGI = "
6982 			" 0.8us %d"
6983 			" 0.4us %d"
6984 			" 1.6us %d"
6985 			" 3.2us %d",
6986 			peer->stats.tx.sgi_count[0],
6987 			peer->stats.tx.sgi_count[1],
6988 			peer->stats.tx.sgi_count[2],
6989 			peer->stats.tx.sgi_count[3]);
6990 	DP_PRINT_STATS("Excess Retries per AC ");
6991 	DP_PRINT_STATS("	 Best effort = %d",
6992 			peer->stats.tx.excess_retries_per_ac[0]);
6993 	DP_PRINT_STATS("	 Background= %d",
6994 			peer->stats.tx.excess_retries_per_ac[1]);
6995 	DP_PRINT_STATS("	 Video = %d",
6996 			peer->stats.tx.excess_retries_per_ac[2]);
6997 	DP_PRINT_STATS("	 Voice = %d",
6998 			peer->stats.tx.excess_retries_per_ac[3]);
6999 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
7000 			peer->stats.tx.bw[0], peer->stats.tx.bw[1],
7001 			peer->stats.tx.bw[2], peer->stats.tx.bw[3]);
7002 
7003 	index = 0;
7004 	for (i = 0; i < SS_COUNT; i++) {
7005 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
7006 				" %d", peer->stats.tx.nss[i]);
7007 	}
7008 	DP_PRINT_STATS("NSS(1-8) = %s",
7009 			nss);
7010 
7011 	DP_PRINT_STATS("Aggregation:");
7012 	DP_PRINT_STATS("	Number of Msdu's Part of Amsdu = %d",
7013 			peer->stats.tx.amsdu_cnt);
7014 	DP_PRINT_STATS("	Number of Msdu's With No Msdu Level Aggregation = %d\n",
7015 			peer->stats.tx.non_amsdu_cnt);
7016 
7017 	DP_PRINT_STATS("Bytes and Packets transmitted  in last one sec:");
7018 	DP_PRINT_STATS("	Bytes transmitted in last sec: %d",
7019 		       peer->stats.tx.tx_byte_rate);
7020 	DP_PRINT_STATS("	Data transmitted in last sec: %d",
7021 		       peer->stats.tx.tx_data_rate);
7022 
7023 	DP_PRINT_STATS("Node Rx Stats:");
7024 	DP_PRINT_STATS("Packets Sent To Stack = %d",
7025 			peer->stats.rx.to_stack.num);
7026 	DP_PRINT_STATS("Bytes Sent To Stack = %llu",
7027 			peer->stats.rx.to_stack.bytes);
7028 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
7029 		DP_PRINT_STATS("Ring Id = %d", i);
7030 		DP_PRINT_STATS("	Packets Received = %d",
7031 				peer->stats.rx.rcvd_reo[i].num);
7032 		DP_PRINT_STATS("	Bytes Received = %llu",
7033 				peer->stats.rx.rcvd_reo[i].bytes);
7034 	}
7035 	DP_PRINT_STATS("Multicast Packets Received = %d",
7036 			peer->stats.rx.multicast.num);
7037 	DP_PRINT_STATS("Multicast Bytes Received = %llu",
7038 			peer->stats.rx.multicast.bytes);
7039 	DP_PRINT_STATS("Broadcast Packets Received = %d",
7040 			peer->stats.rx.bcast.num);
7041 	DP_PRINT_STATS("Broadcast Bytes Received = %llu",
7042 			peer->stats.rx.bcast.bytes);
7043 	DP_PRINT_STATS("Intra BSS Packets Received = %d",
7044 			peer->stats.rx.intra_bss.pkts.num);
7045 	DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
7046 			peer->stats.rx.intra_bss.pkts.bytes);
7047 	DP_PRINT_STATS("Raw Packets Received = %d",
7048 			peer->stats.rx.raw.num);
7049 	DP_PRINT_STATS("Raw Bytes Received = %llu",
7050 			peer->stats.rx.raw.bytes);
7051 	DP_PRINT_STATS("Errors: MIC Errors = %d",
7052 			peer->stats.rx.err.mic_err);
7053 	DP_PRINT_STATS("Erros: Decryption Errors = %d",
7054 			peer->stats.rx.err.decrypt_err);
7055 	DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
7056 			peer->stats.rx.non_ampdu_cnt);
7057 	DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
7058 			peer->stats.rx.ampdu_cnt);
7059 	DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
7060 			peer->stats.rx.non_amsdu_cnt);
7061 	DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
7062 			peer->stats.rx.amsdu_cnt);
7063 	DP_PRINT_STATS("NAWDS : ");
7064 	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet = %d",
7065 			peer->stats.rx.nawds_mcast_drop);
7066 	DP_PRINT_STATS("SGI ="
7067 			" 0.8us %d"
7068 			" 0.4us %d"
7069 			" 1.6us %d"
7070 			" 3.2us %d",
7071 			peer->stats.rx.sgi_count[0],
7072 			peer->stats.rx.sgi_count[1],
7073 			peer->stats.rx.sgi_count[2],
7074 			peer->stats.rx.sgi_count[3]);
7075 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
7076 			peer->stats.rx.bw[0], peer->stats.rx.bw[1],
7077 			peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
7078 	DP_PRINT_STATS("Reception Type ="
7079 			" SU %d,"
7080 			" MU_MIMO %d,"
7081 			" MU_OFDMA %d,"
7082 			" MU_OFDMA_MIMO %d",
7083 			peer->stats.rx.reception_type[0],
7084 			peer->stats.rx.reception_type[1],
7085 			peer->stats.rx.reception_type[2],
7086 			peer->stats.rx.reception_type[3]);
7087 
7088 	dp_print_common_rates_info(peer->stats.rx.pkt_type);
7089 
7090 	index = 0;
7091 	for (i = 0; i < SS_COUNT; i++) {
7092 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
7093 				" %d", peer->stats.rx.nss[i]);
7094 	}
7095 	DP_PRINT_STATS("NSS(1-8) = %s",
7096 			nss);
7097 
7098 	DP_PRINT_STATS("Aggregation:");
7099 	DP_PRINT_STATS("	Msdu's Part of Ampdu = %d",
7100 			peer->stats.rx.ampdu_cnt);
7101 	DP_PRINT_STATS("	Msdu's With No Mpdu Level Aggregation = %d",
7102 			peer->stats.rx.non_ampdu_cnt);
7103 	DP_PRINT_STATS("	Msdu's Part of Amsdu = %d",
7104 			peer->stats.rx.amsdu_cnt);
7105 	DP_PRINT_STATS("	Msdu's With No Msdu Level Aggregation = %d",
7106 			peer->stats.rx.non_amsdu_cnt);
7107 
7108 	DP_PRINT_STATS("Bytes and Packets received in last one sec:");
7109 	DP_PRINT_STATS("	Bytes received in last sec: %d",
7110 		       peer->stats.rx.rx_byte_rate);
7111 	DP_PRINT_STATS("	Data received in last sec: %d",
7112 		       peer->stats.rx.rx_data_rate);
7113 }
7114 
7115 /*
7116  * dp_get_host_peer_stats()- function to print peer stats
7117  * @pdev_handle: DP_PDEV handle
7118  * @mac_addr: mac address of the peer
7119  *
7120  * Return: void
7121  */
7122 static void
7123 dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
7124 {
7125 	struct dp_peer *peer;
7126 	uint8_t local_id;
7127 
7128 	if (!mac_addr) {
7129 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7130 			  "Invalid MAC address\n");
7131 		return;
7132 	}
7133 
7134 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
7135 			&local_id);
7136 
7137 	if (!peer) {
7138 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7139 			  "%s: Invalid peer\n", __func__);
7140 		return;
7141 	}
7142 
7143 	dp_print_peer_stats(peer);
7144 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
7145 }
7146 
7147 /**
7148  * dp_print_soc_cfg_params()- Dump soc wlan config parameters
7149  * @soc_handle: Soc handle
7150  *
7151  * Return: void
7152  */
7153 static void
7154 dp_print_soc_cfg_params(struct dp_soc *soc)
7155 {
7156 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
7157 	uint8_t index = 0, i = 0;
7158 	char ring_mask[DP_MAX_INT_CONTEXTS_STRING_LENGTH];
7159 	int num_of_int_contexts;
7160 
7161 	if (!soc) {
7162 		dp_err("Context is null");
7163 		return;
7164 	}
7165 
7166 	soc_cfg_ctx = soc->wlan_cfg_ctx;
7167 
7168 	if (!soc_cfg_ctx) {
7169 		dp_err("Context is null");
7170 		return;
7171 	}
7172 
7173 	num_of_int_contexts =
7174 			wlan_cfg_get_num_contexts(soc_cfg_ctx);
7175 
7176 	DP_TRACE_STATS(DEBUG, "No. of interrupt contexts: %u",
7177 		       soc_cfg_ctx->num_int_ctxts);
7178 	DP_TRACE_STATS(DEBUG, "Max clients: %u",
7179 		       soc_cfg_ctx->max_clients);
7180 	DP_TRACE_STATS(DEBUG, "Max alloc size: %u ",
7181 		       soc_cfg_ctx->max_alloc_size);
7182 	DP_TRACE_STATS(DEBUG, "Per pdev tx ring: %u ",
7183 		       soc_cfg_ctx->per_pdev_tx_ring);
7184 	DP_TRACE_STATS(DEBUG, "Num tcl data rings: %u ",
7185 		       soc_cfg_ctx->num_tcl_data_rings);
7186 	DP_TRACE_STATS(DEBUG, "Per pdev rx ring: %u ",
7187 		       soc_cfg_ctx->per_pdev_rx_ring);
7188 	DP_TRACE_STATS(DEBUG, "Per pdev lmac ring: %u ",
7189 		       soc_cfg_ctx->per_pdev_lmac_ring);
7190 	DP_TRACE_STATS(DEBUG, "Num of reo dest rings: %u ",
7191 		       soc_cfg_ctx->num_reo_dest_rings);
7192 	DP_TRACE_STATS(DEBUG, "Num tx desc pool: %u ",
7193 		       soc_cfg_ctx->num_tx_desc_pool);
7194 	DP_TRACE_STATS(DEBUG, "Num tx ext desc pool: %u ",
7195 		       soc_cfg_ctx->num_tx_ext_desc_pool);
7196 	DP_TRACE_STATS(DEBUG, "Num tx desc: %u ",
7197 		       soc_cfg_ctx->num_tx_desc);
7198 	DP_TRACE_STATS(DEBUG, "Num tx ext desc: %u ",
7199 		       soc_cfg_ctx->num_tx_ext_desc);
7200 	DP_TRACE_STATS(DEBUG, "Htt packet type: %u ",
7201 		       soc_cfg_ctx->htt_packet_type);
7202 	DP_TRACE_STATS(DEBUG, "Max peer_ids: %u ",
7203 		       soc_cfg_ctx->max_peer_id);
7204 	DP_TRACE_STATS(DEBUG, "Tx ring size: %u ",
7205 		       soc_cfg_ctx->tx_ring_size);
7206 	DP_TRACE_STATS(DEBUG, "Tx comp ring size: %u ",
7207 		       soc_cfg_ctx->tx_comp_ring_size);
7208 	DP_TRACE_STATS(DEBUG, "Tx comp ring size nss: %u ",
7209 		       soc_cfg_ctx->tx_comp_ring_size_nss);
7210 	DP_TRACE_STATS(DEBUG, "Int batch threshold tx: %u ",
7211 		       soc_cfg_ctx->int_batch_threshold_tx);
7212 	DP_TRACE_STATS(DEBUG, "Int timer threshold tx: %u ",
7213 		       soc_cfg_ctx->int_timer_threshold_tx);
7214 	DP_TRACE_STATS(DEBUG, "Int batch threshold rx: %u ",
7215 		       soc_cfg_ctx->int_batch_threshold_rx);
7216 	DP_TRACE_STATS(DEBUG, "Int timer threshold rx: %u ",
7217 		       soc_cfg_ctx->int_timer_threshold_rx);
7218 	DP_TRACE_STATS(DEBUG, "Int batch threshold other: %u ",
7219 		       soc_cfg_ctx->int_batch_threshold_other);
7220 	DP_TRACE_STATS(DEBUG, "Int timer threshold other: %u ",
7221 		       soc_cfg_ctx->int_timer_threshold_other);
7222 
7223 	for (i = 0; i < num_of_int_contexts; i++) {
7224 		index += qdf_snprint(&ring_mask[index],
7225 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7226 				     " %d",
7227 				     soc_cfg_ctx->int_tx_ring_mask[i]);
7228 	}
7229 
7230 	DP_TRACE_STATS(DEBUG, "Tx ring mask (0-%d):%s",
7231 		       num_of_int_contexts, ring_mask);
7232 
7233 	index = 0;
7234 	for (i = 0; i < num_of_int_contexts; i++) {
7235 		index += qdf_snprint(&ring_mask[index],
7236 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7237 				     " %d",
7238 				     soc_cfg_ctx->int_rx_ring_mask[i]);
7239 	}
7240 
7241 	DP_TRACE_STATS(DEBUG, "Rx ring mask (0-%d):%s",
7242 		       num_of_int_contexts, ring_mask);
7243 
7244 	index = 0;
7245 	for (i = 0; i < num_of_int_contexts; i++) {
7246 		index += qdf_snprint(&ring_mask[index],
7247 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7248 				     " %d",
7249 				     soc_cfg_ctx->int_rx_mon_ring_mask[i]);
7250 	}
7251 
7252 	DP_TRACE_STATS(DEBUG, "Rx mon ring mask (0-%d):%s",
7253 		       num_of_int_contexts, ring_mask);
7254 
7255 	index = 0;
7256 	for (i = 0; i < num_of_int_contexts; i++) {
7257 		index += qdf_snprint(&ring_mask[index],
7258 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7259 				     " %d",
7260 				     soc_cfg_ctx->int_rx_err_ring_mask[i]);
7261 	}
7262 
7263 	DP_TRACE_STATS(DEBUG, "Rx err ring mask (0-%d):%s",
7264 		       num_of_int_contexts, ring_mask);
7265 
7266 	index = 0;
7267 	for (i = 0; i < num_of_int_contexts; i++) {
7268 		index += qdf_snprint(&ring_mask[index],
7269 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7270 				     " %d",
7271 				     soc_cfg_ctx->int_rx_wbm_rel_ring_mask[i]);
7272 	}
7273 
7274 	DP_TRACE_STATS(DEBUG, "Rx wbm rel ring mask (0-%d):%s",
7275 		       num_of_int_contexts, ring_mask);
7276 
7277 	index = 0;
7278 	for (i = 0; i < num_of_int_contexts; i++) {
7279 		index += qdf_snprint(&ring_mask[index],
7280 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7281 				     " %d",
7282 				     soc_cfg_ctx->int_reo_status_ring_mask[i]);
7283 	}
7284 
7285 	DP_TRACE_STATS(DEBUG, "Reo ring mask (0-%d):%s",
7286 		       num_of_int_contexts, ring_mask);
7287 
7288 	index = 0;
7289 	for (i = 0; i < num_of_int_contexts; i++) {
7290 		index += qdf_snprint(&ring_mask[index],
7291 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7292 				     " %d",
7293 				     soc_cfg_ctx->int_rxdma2host_ring_mask[i]);
7294 	}
7295 
7296 	DP_TRACE_STATS(DEBUG, "Rxdma2host ring mask (0-%d):%s",
7297 		       num_of_int_contexts, ring_mask);
7298 
7299 	index = 0;
7300 	for (i = 0; i < num_of_int_contexts; i++) {
7301 		index += qdf_snprint(&ring_mask[index],
7302 				     DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7303 				     " %d",
7304 				     soc_cfg_ctx->int_host2rxdma_ring_mask[i]);
7305 	}
7306 
7307 	DP_TRACE_STATS(DEBUG, "Host2rxdma ring mask (0-%d):%s",
7308 		       num_of_int_contexts, ring_mask);
7309 
7310 	DP_TRACE_STATS(DEBUG, "Rx hash: %u ",
7311 		       soc_cfg_ctx->rx_hash);
7312 	DP_TRACE_STATS(DEBUG, "Tso enabled: %u ",
7313 		       soc_cfg_ctx->tso_enabled);
7314 	DP_TRACE_STATS(DEBUG, "Lro enabled: %u ",
7315 		       soc_cfg_ctx->lro_enabled);
7316 	DP_TRACE_STATS(DEBUG, "Sg enabled: %u ",
7317 		       soc_cfg_ctx->sg_enabled);
7318 	DP_TRACE_STATS(DEBUG, "Gro enabled: %u ",
7319 		       soc_cfg_ctx->gro_enabled);
7320 	DP_TRACE_STATS(DEBUG, "rawmode enabled: %u ",
7321 		       soc_cfg_ctx->rawmode_enabled);
7322 	DP_TRACE_STATS(DEBUG, "peer flow ctrl enabled: %u ",
7323 		       soc_cfg_ctx->peer_flow_ctrl_enabled);
7324 	DP_TRACE_STATS(DEBUG, "napi enabled: %u ",
7325 		       soc_cfg_ctx->napi_enabled);
7326 	DP_TRACE_STATS(DEBUG, "Tcp Udp checksum offload: %u ",
7327 		       soc_cfg_ctx->tcp_udp_checksumoffload);
7328 	DP_TRACE_STATS(DEBUG, "Defrag timeout check: %u ",
7329 		       soc_cfg_ctx->defrag_timeout_check);
7330 	DP_TRACE_STATS(DEBUG, "Rx defrag min timeout: %u ",
7331 		       soc_cfg_ctx->rx_defrag_min_timeout);
7332 	DP_TRACE_STATS(DEBUG, "WBM release ring: %u ",
7333 		       soc_cfg_ctx->wbm_release_ring);
7334 	DP_TRACE_STATS(DEBUG, "TCL CMD ring: %u ",
7335 		       soc_cfg_ctx->tcl_cmd_ring);
7336 	DP_TRACE_STATS(DEBUG, "TCL Status ring: %u ",
7337 		       soc_cfg_ctx->tcl_status_ring);
7338 	DP_TRACE_STATS(DEBUG, "REO Reinject ring: %u ",
7339 		       soc_cfg_ctx->reo_reinject_ring);
7340 	DP_TRACE_STATS(DEBUG, "RX release ring: %u ",
7341 		       soc_cfg_ctx->rx_release_ring);
7342 	DP_TRACE_STATS(DEBUG, "REO Exception ring: %u ",
7343 		       soc_cfg_ctx->reo_exception_ring);
7344 	DP_TRACE_STATS(DEBUG, "REO CMD ring: %u ",
7345 		       soc_cfg_ctx->reo_cmd_ring);
7346 	DP_TRACE_STATS(DEBUG, "REO STATUS ring: %u ",
7347 		       soc_cfg_ctx->reo_status_ring);
7348 	DP_TRACE_STATS(DEBUG, "RXDMA refill ring: %u ",
7349 		       soc_cfg_ctx->rxdma_refill_ring);
7350 	DP_TRACE_STATS(DEBUG, "RXDMA err dst ring: %u ",
7351 		       soc_cfg_ctx->rxdma_err_dst_ring);
7352 }
7353 
7354 /**
7355  * dp_print_vdev_cfg_params() - Print the pdev cfg parameters
7356  * @pdev_handle: DP pdev handle
7357  *
7358  * Return - void
7359  */
7360 static void
7361 dp_print_pdev_cfg_params(struct dp_pdev *pdev)
7362 {
7363 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
7364 
7365 	if (!pdev) {
7366 		dp_err("Context is null");
7367 		return;
7368 	}
7369 
7370 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
7371 
7372 	if (!pdev_cfg_ctx) {
7373 		dp_err("Context is null");
7374 		return;
7375 	}
7376 
7377 	DP_TRACE_STATS(DEBUG, "Rx dma buf ring size: %d ",
7378 		       pdev_cfg_ctx->rx_dma_buf_ring_size);
7379 	DP_TRACE_STATS(DEBUG, "DMA Mon buf ring size: %d ",
7380 		       pdev_cfg_ctx->dma_mon_buf_ring_size);
7381 	DP_TRACE_STATS(DEBUG, "DMA Mon dest ring size: %d ",
7382 		       pdev_cfg_ctx->dma_mon_dest_ring_size);
7383 	DP_TRACE_STATS(DEBUG, "DMA Mon status ring size: %d ",
7384 		       pdev_cfg_ctx->dma_mon_status_ring_size);
7385 	DP_TRACE_STATS(DEBUG, "Rxdma monitor desc ring: %d",
7386 		       pdev_cfg_ctx->rxdma_monitor_desc_ring);
7387 	DP_TRACE_STATS(DEBUG, "Num mac rings: %d ",
7388 		       pdev_cfg_ctx->num_mac_rings);
7389 }
7390 
7391 /**
7392  * dp_txrx_stats_help() - Helper function for Txrx_Stats
7393  *
7394  * Return: None
7395  */
7396 static void dp_txrx_stats_help(void)
7397 {
7398 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
7399 	dp_info("stats_option:");
7400 	dp_info("  1 -- HTT Tx Statistics");
7401 	dp_info("  2 -- HTT Rx Statistics");
7402 	dp_info("  3 -- HTT Tx HW Queue Statistics");
7403 	dp_info("  4 -- HTT Tx HW Sched Statistics");
7404 	dp_info("  5 -- HTT Error Statistics");
7405 	dp_info("  6 -- HTT TQM Statistics");
7406 	dp_info("  7 -- HTT TQM CMDQ Statistics");
7407 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
7408 	dp_info("  9 -- HTT Tx Rate Statistics");
7409 	dp_info(" 10 -- HTT Rx Rate Statistics");
7410 	dp_info(" 11 -- HTT Peer Statistics");
7411 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
7412 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
7413 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
7414 	dp_info(" 15 -- HTT SRNG Statistics");
7415 	dp_info(" 16 -- HTT SFM Info Statistics");
7416 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
7417 	dp_info(" 18 -- HTT Peer List Details");
7418 	dp_info(" 20 -- Clear Host Statistics");
7419 	dp_info(" 21 -- Host Rx Rate Statistics");
7420 	dp_info(" 22 -- Host Tx Rate Statistics");
7421 	dp_info(" 23 -- Host Tx Statistics");
7422 	dp_info(" 24 -- Host Rx Statistics");
7423 	dp_info(" 25 -- Host AST Statistics");
7424 	dp_info(" 26 -- Host SRNG PTR Statistics");
7425 	dp_info(" 27 -- Host Mon Statistics");
7426 	dp_info(" 28 -- Host REO Queue Statistics");
7427 	dp_info(" 29 -- Host Soc cfg param Statistics");
7428 	dp_info(" 30 -- Host pdev cfg param Statistics");
7429 }
7430 
7431 /**
7432  * dp_print_host_stats()- Function to print the stats aggregated at host
7433  * @vdev_handle: DP_VDEV handle
7434  * @type: host stats type
7435  *
7436  * Return: 0 on success, print error message in case of failure
7437  */
7438 static int
7439 dp_print_host_stats(struct cdp_vdev *vdev_handle,
7440 		    struct cdp_txrx_stats_req *req)
7441 {
7442 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7443 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7444 	enum cdp_host_txrx_stats type =
7445 			dp_stats_mapping_table[req->stats][STATS_HOST];
7446 
7447 	dp_aggregate_pdev_stats(pdev);
7448 
7449 	switch (type) {
7450 	case TXRX_CLEAR_STATS:
7451 		dp_txrx_host_stats_clr(vdev);
7452 		break;
7453 	case TXRX_RX_RATE_STATS:
7454 		dp_print_rx_rates(vdev);
7455 		break;
7456 	case TXRX_TX_RATE_STATS:
7457 		dp_print_tx_rates(vdev);
7458 		break;
7459 	case TXRX_TX_HOST_STATS:
7460 		dp_print_pdev_tx_stats(pdev);
7461 		dp_print_soc_tx_stats(pdev->soc);
7462 		break;
7463 	case TXRX_RX_HOST_STATS:
7464 		dp_print_pdev_rx_stats(pdev);
7465 		dp_print_soc_rx_stats(pdev->soc);
7466 		break;
7467 	case TXRX_AST_STATS:
7468 		dp_print_ast_stats(pdev->soc);
7469 		dp_print_peer_table(vdev);
7470 		break;
7471 	case TXRX_SRNG_PTR_STATS:
7472 		dp_print_ring_stats(pdev);
7473 		break;
7474 	case TXRX_RX_MON_STATS:
7475 		dp_print_pdev_rx_mon_stats(pdev);
7476 		break;
7477 	case TXRX_REO_QUEUE_STATS:
7478 		dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
7479 		break;
7480 	case TXRX_SOC_CFG_PARAMS:
7481 		dp_print_soc_cfg_params(pdev->soc);
7482 		break;
7483 	case TXRX_PDEV_CFG_PARAMS:
7484 		dp_print_pdev_cfg_params(pdev);
7485 		break;
7486 	default:
7487 		dp_info("Wrong Input For TxRx Host Stats");
7488 		dp_txrx_stats_help();
7489 		break;
7490 	}
7491 	return 0;
7492 }
7493 
7494 /*
7495  * dp_ppdu_ring_reset()- Reset PPDU Stats ring
7496  * @pdev: DP_PDEV handle
7497  *
7498  * Return: void
7499  */
7500 static void
7501 dp_ppdu_ring_reset(struct dp_pdev *pdev)
7502 {
7503 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
7504 	int mac_id;
7505 
7506 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
7507 
7508 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
7509 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
7510 							pdev->pdev_id);
7511 
7512 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
7513 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
7514 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
7515 	}
7516 }
7517 
7518 /*
7519  * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
7520  * @pdev: DP_PDEV handle
7521  *
7522  * Return: void
7523  */
7524 static void
7525 dp_ppdu_ring_cfg(struct dp_pdev *pdev)
7526 {
7527 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
7528 	int mac_id;
7529 
7530 	htt_tlv_filter.mpdu_start = 1;
7531 	htt_tlv_filter.msdu_start = 0;
7532 	htt_tlv_filter.packet = 0;
7533 	htt_tlv_filter.msdu_end = 0;
7534 	htt_tlv_filter.mpdu_end = 0;
7535 	htt_tlv_filter.attention = 0;
7536 	htt_tlv_filter.ppdu_start = 1;
7537 	htt_tlv_filter.ppdu_end = 1;
7538 	htt_tlv_filter.ppdu_end_user_stats = 1;
7539 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
7540 	htt_tlv_filter.ppdu_end_status_done = 1;
7541 	htt_tlv_filter.enable_fp = 1;
7542 	htt_tlv_filter.enable_md = 0;
7543 	if (pdev->neighbour_peers_added &&
7544 	    pdev->soc->hw_nac_monitor_support) {
7545 		htt_tlv_filter.enable_md = 1;
7546 		htt_tlv_filter.packet_header = 1;
7547 	}
7548 	if (pdev->mcopy_mode) {
7549 		htt_tlv_filter.packet_header = 1;
7550 		htt_tlv_filter.enable_mo = 1;
7551 	}
7552 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
7553 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
7554 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
7555 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
7556 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
7557 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
7558 	if (pdev->neighbour_peers_added &&
7559 	    pdev->soc->hw_nac_monitor_support)
7560 		htt_tlv_filter.md_data_filter = FILTER_DATA_ALL;
7561 
7562 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
7563 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
7564 						pdev->pdev_id);
7565 
7566 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
7567 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
7568 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
7569 	}
7570 }
7571 
7572 /*
7573  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
7574  *                              modes are enabled or not.
7575  * @dp_pdev: dp pdev handle.
7576  *
7577  * Return: bool
7578  */
7579 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
7580 {
7581 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
7582 	    !pdev->mcopy_mode)
7583 		return true;
7584 	else
7585 		return false;
7586 }
7587 
7588 /*
7589  *dp_set_bpr_enable() - API to enable/disable bpr feature
7590  *@pdev_handle: DP_PDEV handle.
7591  *@val: Provided value.
7592  *
7593  *Return: 0 for success. nonzero for failure.
7594  */
7595 static QDF_STATUS
7596 dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
7597 {
7598 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7599 
7600 	switch (val) {
7601 	case CDP_BPR_DISABLE:
7602 		pdev->bpr_enable = CDP_BPR_DISABLE;
7603 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7604 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
7605 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7606 		} else if (pdev->enhanced_stats_en &&
7607 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7608 			   !pdev->pktlog_ppdu_stats) {
7609 			dp_h2t_cfg_stats_msg_send(pdev,
7610 						  DP_PPDU_STATS_CFG_ENH_STATS,
7611 						  pdev->pdev_id);
7612 		}
7613 		break;
7614 	case CDP_BPR_ENABLE:
7615 		pdev->bpr_enable = CDP_BPR_ENABLE;
7616 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
7617 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
7618 			dp_h2t_cfg_stats_msg_send(pdev,
7619 						  DP_PPDU_STATS_CFG_BPR,
7620 						  pdev->pdev_id);
7621 		} else if (pdev->enhanced_stats_en &&
7622 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7623 			   !pdev->pktlog_ppdu_stats) {
7624 			dp_h2t_cfg_stats_msg_send(pdev,
7625 						  DP_PPDU_STATS_CFG_BPR_ENH,
7626 						  pdev->pdev_id);
7627 		} else if (pdev->pktlog_ppdu_stats) {
7628 			dp_h2t_cfg_stats_msg_send(pdev,
7629 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
7630 						  pdev->pdev_id);
7631 		}
7632 		break;
7633 	default:
7634 		break;
7635 	}
7636 
7637 	return QDF_STATUS_SUCCESS;
7638 }
7639 
7640 /*
7641  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
7642  * @pdev_handle: DP_PDEV handle
7643  * @val: user provided value
7644  *
7645  * Return: 0 for success. nonzero for failure.
7646  */
7647 static QDF_STATUS
7648 dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
7649 {
7650 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7651 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7652 
7653 	if (pdev->mcopy_mode)
7654 		dp_reset_monitor_mode(pdev_handle);
7655 
7656 	switch (val) {
7657 	case 0:
7658 		pdev->tx_sniffer_enable = 0;
7659 		pdev->mcopy_mode = 0;
7660 		pdev->monitor_configured = false;
7661 
7662 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7663 		    !pdev->bpr_enable) {
7664 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7665 			dp_ppdu_ring_reset(pdev);
7666 		} else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
7667 			dp_h2t_cfg_stats_msg_send(pdev,
7668 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7669 		} else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
7670 			dp_h2t_cfg_stats_msg_send(pdev,
7671 						  DP_PPDU_STATS_CFG_BPR_ENH,
7672 						  pdev->pdev_id);
7673 		} else {
7674 			dp_h2t_cfg_stats_msg_send(pdev,
7675 						  DP_PPDU_STATS_CFG_BPR,
7676 						  pdev->pdev_id);
7677 		}
7678 		break;
7679 
7680 	case 1:
7681 		pdev->tx_sniffer_enable = 1;
7682 		pdev->mcopy_mode = 0;
7683 		pdev->monitor_configured = false;
7684 
7685 		if (!pdev->pktlog_ppdu_stats)
7686 			dp_h2t_cfg_stats_msg_send(pdev,
7687 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7688 		break;
7689 	case 2:
7690 		if (pdev->monitor_vdev) {
7691 			status = QDF_STATUS_E_RESOURCES;
7692 			break;
7693 		}
7694 
7695 		pdev->mcopy_mode = 1;
7696 		dp_pdev_configure_monitor_rings(pdev);
7697 		pdev->monitor_configured = true;
7698 		pdev->tx_sniffer_enable = 0;
7699 
7700 		if (!pdev->pktlog_ppdu_stats)
7701 			dp_h2t_cfg_stats_msg_send(pdev,
7702 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7703 		break;
7704 	default:
7705 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7706 			"Invalid value");
7707 		break;
7708 	}
7709 	return status;
7710 }
7711 
7712 /*
7713  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
7714  * @pdev_handle: DP_PDEV handle
7715  *
7716  * Return: void
7717  */
7718 static void
7719 dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
7720 {
7721 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7722 
7723 	if (pdev->enhanced_stats_en == 0)
7724 		dp_cal_client_timer_start(pdev->cal_client_ctx);
7725 
7726 	pdev->enhanced_stats_en = 1;
7727 
7728 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
7729 	    !pdev->monitor_vdev)
7730 		dp_ppdu_ring_cfg(pdev);
7731 
7732 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7733 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7734 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7735 		dp_h2t_cfg_stats_msg_send(pdev,
7736 					  DP_PPDU_STATS_CFG_BPR_ENH,
7737 					  pdev->pdev_id);
7738 	}
7739 }
7740 
7741 /*
7742  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
7743  * @pdev_handle: DP_PDEV handle
7744  *
7745  * Return: void
7746  */
7747 static void
7748 dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
7749 {
7750 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7751 
7752 	if (pdev->enhanced_stats_en == 1)
7753 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
7754 
7755 	pdev->enhanced_stats_en = 0;
7756 
7757 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7758 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7759 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7760 		dp_h2t_cfg_stats_msg_send(pdev,
7761 					  DP_PPDU_STATS_CFG_BPR,
7762 					  pdev->pdev_id);
7763 	}
7764 
7765 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
7766 	    !pdev->monitor_vdev)
7767 		dp_ppdu_ring_reset(pdev);
7768 }
7769 
7770 /*
7771  * dp_get_fw_peer_stats()- function to print peer stats
7772  * @pdev_handle: DP_PDEV handle
7773  * @mac_addr: mac address of the peer
7774  * @cap: Type of htt stats requested
7775  *
7776  * Currently Supporting only MAC ID based requests Only
7777  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
7778  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
7779  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
7780  *
7781  * Return: void
7782  */
7783 static void
7784 dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
7785 		uint32_t cap)
7786 {
7787 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7788 	int i;
7789 	uint32_t config_param0 = 0;
7790 	uint32_t config_param1 = 0;
7791 	uint32_t config_param2 = 0;
7792 	uint32_t config_param3 = 0;
7793 
7794 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
7795 	config_param0 |= (1 << (cap + 1));
7796 
7797 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
7798 		config_param1 |= (1 << i);
7799 	}
7800 
7801 	config_param2 |= (mac_addr[0] & 0x000000ff);
7802 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
7803 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
7804 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
7805 
7806 	config_param3 |= (mac_addr[4] & 0x000000ff);
7807 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
7808 
7809 	dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
7810 			config_param0, config_param1, config_param2,
7811 			config_param3, 0, 0, 0);
7812 
7813 }
7814 
7815 /* This struct definition will be removed from here
7816  * once it get added in FW headers*/
7817 struct httstats_cmd_req {
7818     uint32_t    config_param0;
7819     uint32_t    config_param1;
7820     uint32_t    config_param2;
7821     uint32_t    config_param3;
7822     int cookie;
7823     u_int8_t    stats_id;
7824 };
7825 
7826 /*
7827  * dp_get_htt_stats: function to process the httstas request
7828  * @pdev_handle: DP pdev handle
7829  * @data: pointer to request data
7830  * @data_len: length for request data
7831  *
7832  * return: void
7833  */
7834 static void
7835 dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
7836 {
7837 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7838 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
7839 
7840 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
7841 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
7842 				req->config_param0, req->config_param1,
7843 				req->config_param2, req->config_param3,
7844 				req->cookie, 0, 0);
7845 }
7846 
7847 /*
7848  * dp_set_pdev_param: function to set parameters in pdev
7849  * @pdev_handle: DP pdev handle
7850  * @param: parameter type to be set
7851  * @val: value of parameter to be set
7852  *
7853  * Return: 0 for success. nonzero for failure.
7854  */
7855 static QDF_STATUS dp_set_pdev_param(struct cdp_pdev *pdev_handle,
7856 				    enum cdp_pdev_param_type param,
7857 				    uint8_t val)
7858 {
7859 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7860 	switch (param) {
7861 	case CDP_CONFIG_DEBUG_SNIFFER:
7862 		return dp_config_debug_sniffer(pdev_handle, val);
7863 	case CDP_CONFIG_BPR_ENABLE:
7864 		return dp_set_bpr_enable(pdev_handle, val);
7865 	case CDP_CONFIG_PRIMARY_RADIO:
7866 		pdev->is_primary = val;
7867 		break;
7868 	default:
7869 		return QDF_STATUS_E_INVAL;
7870 	}
7871 	return QDF_STATUS_SUCCESS;
7872 }
7873 
7874 /*
7875  * dp_get_vdev_param: function to get parameters from vdev
7876  * @param: parameter type to get value
7877  *
7878  * return: void
7879  */
7880 static uint32_t dp_get_vdev_param(struct cdp_vdev *vdev_handle,
7881 				  enum cdp_vdev_param_type param)
7882 {
7883 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7884 	uint32_t val;
7885 
7886 	switch (param) {
7887 	case CDP_ENABLE_WDS:
7888 		val = vdev->wds_enabled;
7889 		break;
7890 	case CDP_ENABLE_MEC:
7891 		val = vdev->mec_enabled;
7892 		break;
7893 	case CDP_ENABLE_DA_WAR:
7894 		val = vdev->da_war_enabled;
7895 		break;
7896 	default:
7897 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7898 			  "param value %d is wrong\n",
7899 			  param);
7900 		val = -1;
7901 		break;
7902 	}
7903 
7904 	return val;
7905 }
7906 
7907 /*
7908  * dp_set_vdev_param: function to set parameters in vdev
7909  * @param: parameter type to be set
7910  * @val: value of parameter to be set
7911  *
7912  * return: void
7913  */
7914 static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
7915 		enum cdp_vdev_param_type param, uint32_t val)
7916 {
7917 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7918 	switch (param) {
7919 	case CDP_ENABLE_WDS:
7920 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7921 			  "wds_enable %d for vdev(%p) id(%d)\n",
7922 			  val, vdev, vdev->vdev_id);
7923 		vdev->wds_enabled = val;
7924 		break;
7925 	case CDP_ENABLE_MEC:
7926 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7927 			  "mec_enable %d for vdev(%p) id(%d)\n",
7928 			  val, vdev, vdev->vdev_id);
7929 		vdev->mec_enabled = val;
7930 		break;
7931 	case CDP_ENABLE_DA_WAR:
7932 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7933 			  "da_war_enable %d for vdev(%p) id(%d)\n",
7934 			  val, vdev, vdev->vdev_id);
7935 		vdev->da_war_enabled = val;
7936 		break;
7937 	case CDP_ENABLE_NAWDS:
7938 		vdev->nawds_enabled = val;
7939 		break;
7940 	case CDP_ENABLE_MCAST_EN:
7941 		vdev->mcast_enhancement_en = val;
7942 		break;
7943 	case CDP_ENABLE_PROXYSTA:
7944 		vdev->proxysta_vdev = val;
7945 		break;
7946 	case CDP_UPDATE_TDLS_FLAGS:
7947 		vdev->tdls_link_connected = val;
7948 		break;
7949 	case CDP_CFG_WDS_AGING_TIMER:
7950 		if (val == 0)
7951 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
7952 		else if (val != vdev->wds_aging_timer_val)
7953 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, val);
7954 
7955 		vdev->wds_aging_timer_val = val;
7956 		break;
7957 	case CDP_ENABLE_AP_BRIDGE:
7958 		if (wlan_op_mode_sta != vdev->opmode)
7959 			vdev->ap_bridge_enabled = val;
7960 		else
7961 			vdev->ap_bridge_enabled = false;
7962 		break;
7963 	case CDP_ENABLE_CIPHER:
7964 		vdev->sec_type = val;
7965 		break;
7966 	case CDP_ENABLE_QWRAP_ISOLATION:
7967 		vdev->isolation_vdev = val;
7968 		break;
7969 	default:
7970 		break;
7971 	}
7972 
7973 	dp_tx_vdev_update_search_flags(vdev);
7974 }
7975 
7976 /**
7977  * dp_peer_set_nawds: set nawds bit in peer
7978  * @peer_handle: pointer to peer
7979  * @value: enable/disable nawds
7980  *
7981  * return: void
7982  */
7983 static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
7984 {
7985 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
7986 	peer->nawds_enabled = value;
7987 }
7988 
7989 /*
7990  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
7991  * @vdev_handle: DP_VDEV handle
7992  * @map_id:ID of map that needs to be updated
7993  *
7994  * Return: void
7995  */
7996 static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
7997 		uint8_t map_id)
7998 {
7999 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8000 	vdev->dscp_tid_map_id = map_id;
8001 	return;
8002 }
8003 
8004 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
8005  * @peer_handle: DP pdev handle
8006  *
8007  * return : cdp_pdev_stats pointer
8008  */
8009 static struct cdp_pdev_stats*
8010 dp_txrx_get_pdev_stats(struct cdp_pdev *pdev_handle)
8011 {
8012 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8013 
8014 	dp_aggregate_pdev_stats(pdev);
8015 
8016 	return &pdev->stats;
8017 }
8018 
8019 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
8020  * @peer_handle: DP_PEER handle
8021  *
8022  * return : cdp_peer_stats pointer
8023  */
8024 static struct cdp_peer_stats*
8025 		dp_txrx_get_peer_stats(struct cdp_peer *peer_handle)
8026 {
8027 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8028 
8029 	qdf_assert(peer);
8030 
8031 	return &peer->stats;
8032 }
8033 
8034 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
8035  * @peer_handle: DP_PEER handle
8036  *
8037  * return : void
8038  */
8039 static void dp_txrx_reset_peer_stats(struct cdp_peer *peer_handle)
8040 {
8041 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8042 
8043 	qdf_assert(peer);
8044 
8045 	qdf_mem_set(&peer->stats, sizeof(peer->stats), 0);
8046 }
8047 
8048 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
8049  * @vdev_handle: DP_VDEV handle
8050  * @buf: buffer for vdev stats
8051  *
8052  * return : int
8053  */
8054 static int  dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
8055 				   bool is_aggregate)
8056 {
8057 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8058 	struct cdp_vdev_stats *vdev_stats = (struct cdp_vdev_stats *)buf;
8059 
8060 	if (is_aggregate)
8061 		dp_aggregate_vdev_stats(vdev, buf);
8062 	else
8063 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
8064 
8065 	return 0;
8066 }
8067 
8068 /*
8069  * dp_get_total_per(): get total per
8070  * @pdev_handle: DP_PDEV handle
8071  *
8072  * Return: % error rate using retries per packet and success packets
8073  */
8074 static int dp_get_total_per(struct cdp_pdev *pdev_handle)
8075 {
8076 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8077 
8078 	dp_aggregate_pdev_stats(pdev);
8079 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
8080 		return 0;
8081 	return ((pdev->stats.tx.retries * 100) /
8082 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
8083 }
8084 
8085 /*
8086  * dp_txrx_stats_publish(): publish pdev stats into a buffer
8087  * @pdev_handle: DP_PDEV handle
8088  * @buf: to hold pdev_stats
8089  *
8090  * Return: int
8091  */
8092 static int
8093 dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
8094 {
8095 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8096 	struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
8097 	struct cdp_txrx_stats_req req = {0,};
8098 
8099 	dp_aggregate_pdev_stats(pdev);
8100 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
8101 	req.cookie_val = 1;
8102 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
8103 				req.param1, req.param2, req.param3, 0,
8104 				req.cookie_val, 0);
8105 
8106 	msleep(DP_MAX_SLEEP_TIME);
8107 
8108 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
8109 	req.cookie_val = 1;
8110 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
8111 				req.param1, req.param2, req.param3, 0,
8112 				req.cookie_val, 0);
8113 
8114 	msleep(DP_MAX_SLEEP_TIME);
8115 	qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
8116 
8117 	return TXRX_STATS_LEVEL;
8118 }
8119 
8120 /**
8121  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
8122  * @pdev: DP_PDEV handle
8123  * @map_id: ID of map that needs to be updated
8124  * @tos: index value in map
8125  * @tid: tid value passed by the user
8126  *
8127  * Return: void
8128  */
8129 static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
8130 		uint8_t map_id, uint8_t tos, uint8_t tid)
8131 {
8132 	uint8_t dscp;
8133 	struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
8134 	struct dp_soc *soc = pdev->soc;
8135 
8136 	if (!soc)
8137 		return;
8138 
8139 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
8140 	pdev->dscp_tid_map[map_id][dscp] = tid;
8141 
8142 	if (map_id < soc->num_hw_dscp_tid_map)
8143 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
8144 				       map_id, dscp);
8145 	return;
8146 }
8147 
8148 /**
8149  * dp_hmmc_tid_override_en_wifi3(): Function to enable hmmc tid override.
8150  * @pdev_handle: pdev handle
8151  * @val: hmmc-dscp flag value
8152  *
8153  * Return: void
8154  */
8155 static void dp_hmmc_tid_override_en_wifi3(struct cdp_pdev *pdev_handle,
8156 					  bool val)
8157 {
8158 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8159 
8160 	pdev->hmmc_tid_override_en = val;
8161 }
8162 
8163 /**
8164  * dp_set_hmmc_tid_val_wifi3(): Function to set hmmc tid value.
8165  * @pdev_handle: pdev handle
8166  * @tid: tid value
8167  *
8168  * Return: void
8169  */
8170 static void dp_set_hmmc_tid_val_wifi3(struct cdp_pdev *pdev_handle,
8171 				      uint8_t tid)
8172 {
8173 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8174 
8175 	pdev->hmmc_tid = tid;
8176 }
8177 
8178 /**
8179  * dp_fw_stats_process(): Process TxRX FW stats request
8180  * @vdev_handle: DP VDEV handle
8181  * @req: stats request
8182  *
8183  * return: int
8184  */
8185 static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
8186 		struct cdp_txrx_stats_req *req)
8187 {
8188 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8189 	struct dp_pdev *pdev = NULL;
8190 	uint32_t stats = req->stats;
8191 	uint8_t mac_id = req->mac_id;
8192 
8193 	if (!vdev) {
8194 		DP_TRACE(NONE, "VDEV not found");
8195 		return 1;
8196 	}
8197 	pdev = vdev->pdev;
8198 
8199 	/*
8200 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
8201 	 * from param0 to param3 according to below rule:
8202 	 *
8203 	 * PARAM:
8204 	 *   - config_param0 : start_offset (stats type)
8205 	 *   - config_param1 : stats bmask from start offset
8206 	 *   - config_param2 : stats bmask from start offset + 32
8207 	 *   - config_param3 : stats bmask from start offset + 64
8208 	 */
8209 	if (req->stats == CDP_TXRX_STATS_0) {
8210 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
8211 		req->param1 = 0xFFFFFFFF;
8212 		req->param2 = 0xFFFFFFFF;
8213 		req->param3 = 0xFFFFFFFF;
8214 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
8215 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
8216 	}
8217 
8218 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
8219 				req->param1, req->param2, req->param3,
8220 				0, 0, mac_id);
8221 }
8222 
8223 /**
8224  * dp_txrx_stats_request - function to map to firmware and host stats
8225  * @vdev: virtual handle
8226  * @req: stats request
8227  *
8228  * Return: QDF_STATUS
8229  */
8230 static
8231 QDF_STATUS dp_txrx_stats_request(struct cdp_vdev *vdev,
8232 				 struct cdp_txrx_stats_req *req)
8233 {
8234 	int host_stats;
8235 	int fw_stats;
8236 	enum cdp_stats stats;
8237 	int num_stats;
8238 
8239 	if (!vdev || !req) {
8240 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8241 				"Invalid vdev/req instance");
8242 		return QDF_STATUS_E_INVAL;
8243 	}
8244 
8245 	stats = req->stats;
8246 	if (stats >= CDP_TXRX_MAX_STATS)
8247 		return QDF_STATUS_E_INVAL;
8248 
8249 	/*
8250 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
8251 	 *			has to be updated if new FW HTT stats added
8252 	 */
8253 	if (stats > CDP_TXRX_STATS_HTT_MAX)
8254 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
8255 
8256 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
8257 
8258 	if (stats >= num_stats) {
8259 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8260 			  "%s: Invalid stats option: %d", __func__, stats);
8261 		return QDF_STATUS_E_INVAL;
8262 	}
8263 
8264 	req->stats = stats;
8265 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
8266 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
8267 
8268 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8269 		 "stats: %u fw_stats_type: %d host_stats: %d",
8270 		  stats, fw_stats, host_stats);
8271 
8272 	if (fw_stats != TXRX_FW_STATS_INVALID) {
8273 		/* update request with FW stats type */
8274 		req->stats = fw_stats;
8275 		return dp_fw_stats_process(vdev, req);
8276 	}
8277 
8278 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
8279 			(host_stats <= TXRX_HOST_STATS_MAX))
8280 		return dp_print_host_stats(vdev, req);
8281 	else
8282 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8283 				"Wrong Input for TxRx Stats");
8284 
8285 	return QDF_STATUS_SUCCESS;
8286 }
8287 
8288 /*
8289  * dp_print_napi_stats(): NAPI stats
8290  * @soc - soc handle
8291  */
8292 static void dp_print_napi_stats(struct dp_soc *soc)
8293 {
8294 	hif_print_napi_stats(soc->hif_handle);
8295 }
8296 
8297 /*
8298  * dp_print_per_ring_stats(): Packet count per ring
8299  * @soc - soc handle
8300  */
8301 static void dp_print_per_ring_stats(struct dp_soc *soc)
8302 {
8303 	uint8_t ring;
8304 	uint16_t core;
8305 	uint64_t total_packets;
8306 
8307 	DP_TRACE_STATS(INFO_HIGH, "Reo packets per ring:");
8308 	for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
8309 		total_packets = 0;
8310 		DP_TRACE_STATS(INFO_HIGH,
8311 			       "Packets on ring %u:", ring);
8312 		for (core = 0; core < NR_CPUS; core++) {
8313 			DP_TRACE_STATS(INFO_HIGH,
8314 				       "Packets arriving on core %u: %llu",
8315 				       core,
8316 				       soc->stats.rx.ring_packets[core][ring]);
8317 			total_packets += soc->stats.rx.ring_packets[core][ring];
8318 		}
8319 		DP_TRACE_STATS(INFO_HIGH,
8320 			       "Total packets on ring %u: %llu",
8321 			       ring, total_packets);
8322 	}
8323 }
8324 
8325 /*
8326  * dp_txrx_path_stats() - Function to display dump stats
8327  * @soc - soc handle
8328  *
8329  * return: none
8330  */
8331 static void dp_txrx_path_stats(struct dp_soc *soc)
8332 {
8333 	uint8_t error_code;
8334 	uint8_t loop_pdev;
8335 	struct dp_pdev *pdev;
8336 	uint8_t i;
8337 
8338 	if (!soc) {
8339 		DP_TRACE(ERROR, "%s: Invalid access",
8340 			 __func__);
8341 		return;
8342 	}
8343 
8344 	for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
8345 
8346 		pdev = soc->pdev_list[loop_pdev];
8347 		dp_aggregate_pdev_stats(pdev);
8348 		DP_TRACE_STATS(INFO_HIGH, "Tx path Statistics:");
8349 		DP_TRACE_STATS(INFO_HIGH, "from stack: %u msdus (%llu bytes)",
8350 			       pdev->stats.tx_i.rcvd.num,
8351 			       pdev->stats.tx_i.rcvd.bytes);
8352 		DP_TRACE_STATS(INFO_HIGH,
8353 			       "processed from host: %u msdus (%llu bytes)",
8354 			       pdev->stats.tx_i.processed.num,
8355 			       pdev->stats.tx_i.processed.bytes);
8356 		DP_TRACE_STATS(INFO_HIGH,
8357 			       "successfully transmitted: %u msdus (%llu bytes)",
8358 			       pdev->stats.tx.tx_success.num,
8359 			       pdev->stats.tx.tx_success.bytes);
8360 
8361 		DP_TRACE_STATS(INFO_HIGH, "Dropped in host:");
8362 		DP_TRACE_STATS(INFO_HIGH, "Total packets dropped: %u,",
8363 			       pdev->stats.tx_i.dropped.dropped_pkt.num);
8364 		DP_TRACE_STATS(INFO_HIGH, "Descriptor not available: %u",
8365 			       pdev->stats.tx_i.dropped.desc_na.num);
8366 		DP_TRACE_STATS(INFO_HIGH, "Ring full: %u",
8367 			       pdev->stats.tx_i.dropped.ring_full);
8368 		DP_TRACE_STATS(INFO_HIGH, "Enqueue fail: %u",
8369 			       pdev->stats.tx_i.dropped.enqueue_fail);
8370 		DP_TRACE_STATS(INFO_HIGH, "DMA Error: %u",
8371 			       pdev->stats.tx_i.dropped.dma_error);
8372 
8373 		DP_TRACE_STATS(INFO_HIGH, "Dropped in hardware:");
8374 		DP_TRACE_STATS(INFO_HIGH, "total packets dropped: %u",
8375 			       pdev->stats.tx.tx_failed);
8376 		DP_TRACE_STATS(INFO_HIGH, "mpdu age out: %u",
8377 			       pdev->stats.tx.dropped.age_out);
8378 		DP_TRACE_STATS(INFO_HIGH, "firmware removed packets: %u",
8379 			       pdev->stats.tx.dropped.fw_rem.num);
8380 		DP_TRACE_STATS(INFO_HIGH, "firmware removed bytes: %llu",
8381 			       pdev->stats.tx.dropped.fw_rem.bytes);
8382 		DP_TRACE_STATS(INFO_HIGH, "firmware removed tx: %u",
8383 			       pdev->stats.tx.dropped.fw_rem_tx);
8384 		DP_TRACE_STATS(INFO_HIGH, "firmware removed notx %u",
8385 			       pdev->stats.tx.dropped.fw_rem_notx);
8386 		DP_TRACE_STATS(INFO_HIGH, "Invalid peer on tx path: %u",
8387 			       pdev->soc->stats.tx.tx_invalid_peer.num);
8388 
8389 		DP_TRACE_STATS(INFO_HIGH, "Tx packets sent per interrupt:");
8390 		DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
8391 			       pdev->stats.tx_comp_histogram.pkts_1);
8392 		DP_TRACE_STATS(INFO_HIGH, "2-20 Packets:  %u",
8393 			       pdev->stats.tx_comp_histogram.pkts_2_20);
8394 		DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
8395 			       pdev->stats.tx_comp_histogram.pkts_21_40);
8396 		DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
8397 			       pdev->stats.tx_comp_histogram.pkts_41_60);
8398 		DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
8399 			       pdev->stats.tx_comp_histogram.pkts_61_80);
8400 		DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
8401 			       pdev->stats.tx_comp_histogram.pkts_81_100);
8402 		DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
8403 			       pdev->stats.tx_comp_histogram.pkts_101_200);
8404 		DP_TRACE_STATS(INFO_HIGH, "   201+ Packets: %u",
8405 			       pdev->stats.tx_comp_histogram.pkts_201_plus);
8406 
8407 		DP_TRACE_STATS(INFO_HIGH, "Rx path statistics");
8408 
8409 		DP_TRACE_STATS(INFO_HIGH,
8410 			       "delivered %u msdus ( %llu bytes),",
8411 			       pdev->stats.rx.to_stack.num,
8412 			       pdev->stats.rx.to_stack.bytes);
8413 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)
8414 			DP_TRACE_STATS(INFO_HIGH,
8415 				       "received on reo[%d] %u msdus( %llu bytes),",
8416 				       i, pdev->stats.rx.rcvd_reo[i].num,
8417 				       pdev->stats.rx.rcvd_reo[i].bytes);
8418 		DP_TRACE_STATS(INFO_HIGH,
8419 			       "intra-bss packets %u msdus ( %llu bytes),",
8420 			       pdev->stats.rx.intra_bss.pkts.num,
8421 			       pdev->stats.rx.intra_bss.pkts.bytes);
8422 		DP_TRACE_STATS(INFO_HIGH,
8423 			       "intra-bss fails %u msdus ( %llu bytes),",
8424 			       pdev->stats.rx.intra_bss.fail.num,
8425 			       pdev->stats.rx.intra_bss.fail.bytes);
8426 		DP_TRACE_STATS(INFO_HIGH,
8427 			       "raw packets %u msdus ( %llu bytes),",
8428 			       pdev->stats.rx.raw.num,
8429 			       pdev->stats.rx.raw.bytes);
8430 		DP_TRACE_STATS(INFO_HIGH, "dropped: error %u msdus",
8431 			       pdev->stats.rx.err.mic_err);
8432 		DP_TRACE_STATS(INFO_HIGH, "Invalid peer on rx path: %u",
8433 			       pdev->soc->stats.rx.err.rx_invalid_peer.num);
8434 
8435 		DP_TRACE_STATS(INFO_HIGH, "Reo Statistics");
8436 		DP_TRACE_STATS(INFO_HIGH, "rbm error: %u msdus",
8437 			       pdev->soc->stats.rx.err.invalid_rbm);
8438 		DP_TRACE_STATS(INFO_HIGH, "hal ring access fail: %u msdus",
8439 			       pdev->soc->stats.rx.err.hal_ring_access_fail);
8440 
8441 		for (error_code = 0; error_code < HAL_REO_ERR_MAX;
8442 				error_code++) {
8443 			if (!pdev->soc->stats.rx.err.reo_error[error_code])
8444 				continue;
8445 			DP_TRACE_STATS(INFO_HIGH,
8446 				       "Reo error number (%u): %u msdus",
8447 				       error_code,
8448 				       pdev->soc->stats.rx.err
8449 				       .reo_error[error_code]);
8450 		}
8451 
8452 		for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
8453 				error_code++) {
8454 			if (!pdev->soc->stats.rx.err.rxdma_error[error_code])
8455 				continue;
8456 			DP_TRACE_STATS(INFO_HIGH,
8457 				       "Rxdma error number (%u): %u msdus",
8458 				       error_code,
8459 				       pdev->soc->stats.rx.err
8460 				       .rxdma_error[error_code]);
8461 		}
8462 
8463 		DP_TRACE_STATS(INFO_HIGH, "Rx packets reaped per interrupt:");
8464 		DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
8465 			       pdev->stats.rx_ind_histogram.pkts_1);
8466 		DP_TRACE_STATS(INFO_HIGH, "2-20 Packets:  %u",
8467 			       pdev->stats.rx_ind_histogram.pkts_2_20);
8468 		DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
8469 			       pdev->stats.rx_ind_histogram.pkts_21_40);
8470 		DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
8471 			       pdev->stats.rx_ind_histogram.pkts_41_60);
8472 		DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
8473 			       pdev->stats.rx_ind_histogram.pkts_61_80);
8474 		DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
8475 			       pdev->stats.rx_ind_histogram.pkts_81_100);
8476 		DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
8477 			       pdev->stats.rx_ind_histogram.pkts_101_200);
8478 		DP_TRACE_STATS(INFO_HIGH, "   201+ Packets: %u",
8479 			       pdev->stats.rx_ind_histogram.pkts_201_plus);
8480 
8481 		DP_TRACE_STATS(INFO_HIGH, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
8482 			       __func__,
8483 			       pdev->soc->wlan_cfg_ctx
8484 			       ->tso_enabled,
8485 			       pdev->soc->wlan_cfg_ctx
8486 			       ->lro_enabled,
8487 			       pdev->soc->wlan_cfg_ctx
8488 			       ->rx_hash,
8489 			       pdev->soc->wlan_cfg_ctx
8490 			       ->napi_enabled);
8491 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
8492 		DP_TRACE_STATS(INFO_HIGH, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
8493 			       __func__,
8494 			       pdev->soc->wlan_cfg_ctx
8495 			       ->tx_flow_stop_queue_threshold,
8496 			       pdev->soc->wlan_cfg_ctx
8497 			       ->tx_flow_start_queue_offset);
8498 #endif
8499 	}
8500 }
8501 
8502 /*
8503  * dp_txrx_dump_stats() -  Dump statistics
8504  * @value - Statistics option
8505  */
8506 static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
8507 				     enum qdf_stats_verbosity_level level)
8508 {
8509 	struct dp_soc *soc =
8510 		(struct dp_soc *)psoc;
8511 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8512 
8513 	if (!soc) {
8514 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8515 			"%s: soc is NULL", __func__);
8516 		return QDF_STATUS_E_INVAL;
8517 	}
8518 
8519 	switch (value) {
8520 	case CDP_TXRX_PATH_STATS:
8521 		dp_txrx_path_stats(soc);
8522 		break;
8523 
8524 	case CDP_RX_RING_STATS:
8525 		dp_print_per_ring_stats(soc);
8526 		break;
8527 
8528 	case CDP_TXRX_TSO_STATS:
8529 		/* TODO: NOT IMPLEMENTED */
8530 		break;
8531 
8532 	case CDP_DUMP_TX_FLOW_POOL_INFO:
8533 		cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
8534 		break;
8535 
8536 	case CDP_DP_NAPI_STATS:
8537 		dp_print_napi_stats(soc);
8538 		break;
8539 
8540 	case CDP_TXRX_DESC_STATS:
8541 		/* TODO: NOT IMPLEMENTED */
8542 		break;
8543 
8544 	default:
8545 		status = QDF_STATUS_E_INVAL;
8546 		break;
8547 	}
8548 
8549 	return status;
8550 
8551 }
8552 
8553 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
8554 /**
8555  * dp_update_flow_control_parameters() - API to store datapath
8556  *                            config parameters
8557  * @soc: soc handle
8558  * @cfg: ini parameter handle
8559  *
8560  * Return: void
8561  */
8562 static inline
8563 void dp_update_flow_control_parameters(struct dp_soc *soc,
8564 				struct cdp_config_params *params)
8565 {
8566 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
8567 					params->tx_flow_stop_queue_threshold;
8568 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
8569 					params->tx_flow_start_queue_offset;
8570 }
8571 #else
8572 static inline
8573 void dp_update_flow_control_parameters(struct dp_soc *soc,
8574 				struct cdp_config_params *params)
8575 {
8576 }
8577 #endif
8578 
8579 /**
8580  * dp_update_config_parameters() - API to store datapath
8581  *                            config parameters
8582  * @soc: soc handle
8583  * @cfg: ini parameter handle
8584  *
8585  * Return: status
8586  */
8587 static
8588 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
8589 				struct cdp_config_params *params)
8590 {
8591 	struct dp_soc *soc = (struct dp_soc *)psoc;
8592 
8593 	if (!(soc)) {
8594 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8595 				"%s: Invalid handle", __func__);
8596 		return QDF_STATUS_E_INVAL;
8597 	}
8598 
8599 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
8600 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
8601 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
8602 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
8603 				params->tcp_udp_checksumoffload;
8604 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
8605 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
8606 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
8607 
8608 	dp_update_flow_control_parameters(soc, params);
8609 
8610 	return QDF_STATUS_SUCCESS;
8611 }
8612 
8613 /**
8614  * dp_txrx_set_wds_rx_policy() - API to store datapath
8615  *                            config parameters
8616  * @vdev_handle - datapath vdev handle
8617  * @cfg: ini parameter handle
8618  *
8619  * Return: status
8620  */
8621 #ifdef WDS_VENDOR_EXTENSION
8622 void
8623 dp_txrx_set_wds_rx_policy(
8624 		struct cdp_vdev *vdev_handle,
8625 		u_int32_t val)
8626 {
8627 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8628 	struct dp_peer *peer;
8629 	if (vdev->opmode == wlan_op_mode_ap) {
8630 		/* for ap, set it on bss_peer */
8631 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
8632 			if (peer->bss_peer) {
8633 				peer->wds_ecm.wds_rx_filter = 1;
8634 				peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
8635 				peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
8636 				break;
8637 			}
8638 		}
8639 	} else if (vdev->opmode == wlan_op_mode_sta) {
8640 		peer = TAILQ_FIRST(&vdev->peer_list);
8641 		peer->wds_ecm.wds_rx_filter = 1;
8642 		peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
8643 		peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
8644 	}
8645 }
8646 
8647 /**
8648  * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
8649  *
8650  * @peer_handle - datapath peer handle
8651  * @wds_tx_ucast: policy for unicast transmission
8652  * @wds_tx_mcast: policy for multicast transmission
8653  *
8654  * Return: void
8655  */
8656 void
8657 dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
8658 		int wds_tx_ucast, int wds_tx_mcast)
8659 {
8660 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8661 	if (wds_tx_ucast || wds_tx_mcast) {
8662 		peer->wds_enabled = 1;
8663 		peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
8664 		peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
8665 	} else {
8666 		peer->wds_enabled = 0;
8667 		peer->wds_ecm.wds_tx_ucast_4addr = 0;
8668 		peer->wds_ecm.wds_tx_mcast_4addr = 0;
8669 	}
8670 
8671 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8672 			FL("Policy Update set to :\
8673 				peer->wds_enabled %d\
8674 				peer->wds_ecm.wds_tx_ucast_4addr %d\
8675 				peer->wds_ecm.wds_tx_mcast_4addr %d"),
8676 				peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
8677 				peer->wds_ecm.wds_tx_mcast_4addr);
8678 	return;
8679 }
8680 #endif
8681 
8682 static struct cdp_wds_ops dp_ops_wds = {
8683 	.vdev_set_wds = dp_vdev_set_wds,
8684 #ifdef WDS_VENDOR_EXTENSION
8685 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
8686 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
8687 #endif
8688 };
8689 
8690 /*
8691  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
8692  * @vdev_handle - datapath vdev handle
8693  * @callback - callback function
8694  * @ctxt: callback context
8695  *
8696  */
8697 static void
8698 dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
8699 		       ol_txrx_data_tx_cb callback, void *ctxt)
8700 {
8701 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8702 
8703 	vdev->tx_non_std_data_callback.func = callback;
8704 	vdev->tx_non_std_data_callback.ctxt = ctxt;
8705 }
8706 
8707 /**
8708  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
8709  * @pdev_hdl: datapath pdev handle
8710  *
8711  * Return: opaque pointer to dp txrx handle
8712  */
8713 static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
8714 {
8715 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
8716 
8717 	return pdev->dp_txrx_handle;
8718 }
8719 
8720 /**
8721  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
8722  * @pdev_hdl: datapath pdev handle
8723  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
8724  *
8725  * Return: void
8726  */
8727 static void
8728 dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
8729 {
8730 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
8731 
8732 	pdev->dp_txrx_handle = dp_txrx_hdl;
8733 }
8734 
8735 /**
8736  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
8737  * @soc_handle: datapath soc handle
8738  *
8739  * Return: opaque pointer to external dp (non-core DP)
8740  */
8741 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
8742 {
8743 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8744 
8745 	return soc->external_txrx_handle;
8746 }
8747 
8748 /**
8749  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
8750  * @soc_handle: datapath soc handle
8751  * @txrx_handle: opaque pointer to external dp (non-core DP)
8752  *
8753  * Return: void
8754  */
8755 static void
8756 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
8757 {
8758 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8759 
8760 	soc->external_txrx_handle = txrx_handle;
8761 }
8762 
8763 /**
8764  * dp_get_cfg_capabilities() - get dp capabilities
8765  * @soc_handle: datapath soc handle
8766  * @dp_caps: enum for dp capabilities
8767  *
8768  * Return: bool to determine if dp caps is enabled
8769  */
8770 static bool
8771 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
8772 			enum cdp_capabilities dp_caps)
8773 {
8774 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8775 
8776 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
8777 }
8778 
8779 #ifdef FEATURE_AST
8780 static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
8781 {
8782 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
8783 	struct dp_peer *peer = (struct dp_peer *) peer_hdl;
8784 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
8785 
8786 	/*
8787 	 * For BSS peer, new peer is not created on alloc_node if the
8788 	 * peer with same address already exists , instead refcnt is
8789 	 * increased for existing peer. Correspondingly in delete path,
8790 	 * only refcnt is decreased; and peer is only deleted , when all
8791 	 * references are deleted. So delete_in_progress should not be set
8792 	 * for bss_peer, unless only 2 reference remains (peer map reference
8793 	 * and peer hash table reference).
8794 	 */
8795 	if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
8796 		return;
8797 	}
8798 
8799 	peer->delete_in_progress = true;
8800 	dp_peer_delete_ast_entries(soc, peer);
8801 }
8802 #endif
8803 
8804 #ifdef ATH_SUPPORT_NAC_RSSI
8805 /**
8806  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
8807  * @vdev_hdl: DP vdev handle
8808  * @rssi: rssi value
8809  *
8810  * Return: 0 for success. nonzero for failure.
8811  */
8812 static QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
8813 					      char *mac_addr,
8814 					      uint8_t *rssi)
8815 {
8816 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8817 	struct dp_pdev *pdev = vdev->pdev;
8818 	struct dp_neighbour_peer *peer = NULL;
8819 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
8820 
8821 	*rssi = 0;
8822 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
8823 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
8824 		      neighbour_peer_list_elem) {
8825 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
8826 				mac_addr, DP_MAC_ADDR_LEN) == 0) {
8827 			*rssi = peer->rssi;
8828 			status = QDF_STATUS_SUCCESS;
8829 			break;
8830 		}
8831 	}
8832 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
8833 	return status;
8834 }
8835 
8836 static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
8837 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
8838 		uint8_t chan_num)
8839 {
8840 
8841 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8842 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
8843 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
8844 
8845 	pdev->nac_rssi_filtering = 1;
8846 	/* Store address of NAC (neighbour peer) which will be checked
8847 	 * against TA of received packets.
8848 	 */
8849 
8850 	if (cmd == CDP_NAC_PARAM_ADD) {
8851 		dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
8852 						 client_macaddr);
8853 	} else if (cmd == CDP_NAC_PARAM_DEL) {
8854 		dp_update_filter_neighbour_peers(vdev_handle,
8855 						 DP_NAC_PARAM_DEL,
8856 						 client_macaddr);
8857 	}
8858 
8859 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
8860 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
8861 			((void *)vdev->pdev->ctrl_pdev,
8862 			 vdev->vdev_id, cmd, bssid);
8863 
8864 	return QDF_STATUS_SUCCESS;
8865 }
8866 #endif
8867 
8868 /**
8869  * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
8870  * for pktlog
8871  * @txrx_pdev_handle: cdp_pdev handle
8872  * @enb_dsb: Enable or disable peer based filtering
8873  *
8874  * Return: QDF_STATUS
8875  */
8876 static int
8877 dp_enable_peer_based_pktlog(
8878 	struct cdp_pdev *txrx_pdev_handle,
8879 	char *mac_addr, uint8_t enb_dsb)
8880 {
8881 	struct dp_peer *peer;
8882 	uint8_t local_id;
8883 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev_handle;
8884 
8885 	peer = (struct dp_peer *)dp_find_peer_by_addr(txrx_pdev_handle,
8886 			mac_addr, &local_id);
8887 
8888 	if (!peer) {
8889 		dp_err("Invalid Peer");
8890 		return QDF_STATUS_E_FAILURE;
8891 	}
8892 
8893 	peer->peer_based_pktlog_filter = enb_dsb;
8894 	pdev->dp_peer_based_pktlog = enb_dsb;
8895 
8896 	return QDF_STATUS_SUCCESS;
8897 }
8898 
8899 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
8900 					   uint32_t max_peers,
8901 					   bool peer_map_unmap_v2)
8902 {
8903 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8904 
8905 	soc->max_peers = max_peers;
8906 
8907 	qdf_print ("%s max_peers %u\n", __func__, max_peers);
8908 
8909 	if (dp_peer_find_attach(soc))
8910 		return QDF_STATUS_E_FAILURE;
8911 
8912 	soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
8913 
8914 	return QDF_STATUS_SUCCESS;
8915 }
8916 
8917 /**
8918  * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
8919  * @dp_pdev: dp pdev handle
8920  * @ctrl_pdev: UMAC ctrl pdev handle
8921  *
8922  * Return: void
8923  */
8924 static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
8925 				  struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
8926 {
8927 	struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
8928 
8929 	pdev->ctrl_pdev = ctrl_pdev;
8930 }
8931 
8932 /*
8933  * dp_get_cfg() - get dp cfg
8934  * @soc: cdp soc handle
8935  * @cfg: cfg enum
8936  *
8937  * Return: cfg value
8938  */
8939 static uint32_t dp_get_cfg(void *soc, enum cdp_dp_cfg cfg)
8940 {
8941 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
8942 	uint32_t value = 0;
8943 
8944 	switch (cfg) {
8945 	case cfg_dp_enable_data_stall:
8946 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
8947 		break;
8948 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
8949 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
8950 		break;
8951 	case cfg_dp_tso_enable:
8952 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
8953 		break;
8954 	case cfg_dp_lro_enable:
8955 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
8956 		break;
8957 	case cfg_dp_gro_enable:
8958 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
8959 		break;
8960 	case cfg_dp_tx_flow_start_queue_offset:
8961 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
8962 		break;
8963 	case cfg_dp_tx_flow_stop_queue_threshold:
8964 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
8965 		break;
8966 	case cfg_dp_disable_intra_bss_fwd:
8967 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
8968 		break;
8969 	default:
8970 		value =  0;
8971 	}
8972 
8973 	return value;
8974 }
8975 
8976 static struct cdp_cmn_ops dp_ops_cmn = {
8977 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
8978 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
8979 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
8980 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
8981 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
8982 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
8983 	.txrx_peer_create = dp_peer_create_wifi3,
8984 	.txrx_peer_setup = dp_peer_setup_wifi3,
8985 #ifdef FEATURE_AST
8986 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
8987 #else
8988 	.txrx_peer_teardown = NULL,
8989 #endif
8990 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
8991 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
8992 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
8993 	.txrx_peer_get_ast_info_by_pdev =
8994 		dp_peer_get_ast_info_by_pdevid_wifi3,
8995 	.txrx_peer_ast_delete_by_soc =
8996 		dp_peer_ast_entry_del_by_soc,
8997 	.txrx_peer_ast_delete_by_pdev =
8998 		dp_peer_ast_entry_del_by_pdev,
8999 	.txrx_peer_delete = dp_peer_delete_wifi3,
9000 	.txrx_vdev_register = dp_vdev_register_wifi3,
9001 	.txrx_soc_detach = dp_soc_detach_wifi3,
9002 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
9003 	.txrx_soc_init = dp_soc_init_wifi3,
9004 	.txrx_tso_soc_attach = dp_tso_soc_attach,
9005 	.txrx_tso_soc_detach = dp_tso_soc_detach,
9006 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
9007 	.txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
9008 	.txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
9009 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
9010 	.txrx_ath_getstats = dp_get_device_stats,
9011 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
9012 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
9013 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
9014 	.delba_process = dp_delba_process_wifi3,
9015 	.set_addba_response = dp_set_addba_response,
9016 	.get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
9017 	.flush_cache_rx_queue = NULL,
9018 	/* TODO: get API's for dscp-tid need to be added*/
9019 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
9020 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
9021 	.hmmc_tid_override_en = dp_hmmc_tid_override_en_wifi3,
9022 	.set_hmmc_tid_val = dp_set_hmmc_tid_val_wifi3,
9023 	.txrx_get_total_per = dp_get_total_per,
9024 	.txrx_stats_request = dp_txrx_stats_request,
9025 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
9026 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
9027 	.txrx_get_vow_config_frm_pdev = NULL,
9028 	.txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
9029 	.txrx_set_nac = dp_set_nac,
9030 	.txrx_get_tx_pending = dp_get_tx_pending,
9031 	.txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
9032 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
9033 	.display_stats = dp_txrx_dump_stats,
9034 	.txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
9035 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
9036 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
9037 	.txrx_intr_detach = dp_soc_interrupt_detach,
9038 	.set_pn_check = dp_set_pn_check_wifi3,
9039 	.update_config_parameters = dp_update_config_parameters,
9040 	/* TODO: Add other functions */
9041 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
9042 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
9043 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
9044 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
9045 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
9046 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
9047 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
9048 	.tx_send = dp_tx_send,
9049 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
9050 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
9051 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
9052 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
9053 	.txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
9054 	.txrx_get_os_rx_handles_from_vdev =
9055 					dp_get_os_rx_handles_from_vdev_wifi3,
9056 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
9057 	.get_dp_capabilities = dp_get_cfg_capabilities,
9058 	.txrx_get_cfg = dp_get_cfg,
9059 };
9060 
9061 static struct cdp_ctrl_ops dp_ops_ctrl = {
9062 	.txrx_peer_authorize = dp_peer_authorize,
9063 	.txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
9064 	.txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
9065 #ifdef MESH_MODE_SUPPORT
9066 	.txrx_set_mesh_mode  = dp_peer_set_mesh_mode,
9067 	.txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
9068 #endif
9069 	.txrx_set_vdev_param = dp_set_vdev_param,
9070 	.txrx_peer_set_nawds = dp_peer_set_nawds,
9071 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
9072 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
9073 	.txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
9074 	.txrx_update_filter_neighbour_peers =
9075 		dp_update_filter_neighbour_peers,
9076 	.txrx_get_sec_type = dp_get_sec_type,
9077 	/* TODO: Add other functions */
9078 	.txrx_wdi_event_sub = dp_wdi_event_sub,
9079 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
9080 #ifdef WDI_EVENT_ENABLE
9081 	.txrx_get_pldev = dp_get_pldev,
9082 #endif
9083 	.txrx_set_pdev_param = dp_set_pdev_param,
9084 #ifdef ATH_SUPPORT_NAC_RSSI
9085 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
9086 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
9087 #endif
9088 	.set_key = dp_set_michael_key,
9089 	.txrx_get_vdev_param = dp_get_vdev_param,
9090 	.enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
9091 };
9092 
9093 static struct cdp_me_ops dp_ops_me = {
9094 #ifdef ATH_SUPPORT_IQUE
9095 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
9096 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
9097 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
9098 #endif
9099 	.tx_me_find_ast_entry = NULL,
9100 };
9101 
9102 static struct cdp_mon_ops dp_ops_mon = {
9103 	.txrx_monitor_set_filter_ucast_data = NULL,
9104 	.txrx_monitor_set_filter_mcast_data = NULL,
9105 	.txrx_monitor_set_filter_non_data = NULL,
9106 	.txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
9107 	.txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
9108 	.txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
9109 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
9110 	/* Added support for HK advance filter */
9111 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
9112 };
9113 
9114 static struct cdp_host_stats_ops dp_ops_host_stats = {
9115 	.txrx_per_peer_stats = dp_get_host_peer_stats,
9116 	.get_fw_peer_stats = dp_get_fw_peer_stats,
9117 	.get_htt_stats = dp_get_htt_stats,
9118 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
9119 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
9120 	.txrx_stats_publish = dp_txrx_stats_publish,
9121 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
9122 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
9123 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
9124 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
9125 	/* TODO */
9126 };
9127 
9128 static struct cdp_raw_ops dp_ops_raw = {
9129 	/* TODO */
9130 };
9131 
9132 #ifdef CONFIG_WIN
9133 static struct cdp_pflow_ops dp_ops_pflow = {
9134 	/* TODO */
9135 };
9136 #endif /* CONFIG_WIN */
9137 
9138 #ifdef FEATURE_RUNTIME_PM
9139 /**
9140  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
9141  * @opaque_pdev: DP pdev context
9142  *
9143  * DP is ready to runtime suspend if there are no pending TX packets.
9144  *
9145  * Return: QDF_STATUS
9146  */
9147 static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
9148 {
9149 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
9150 	struct dp_soc *soc = pdev->soc;
9151 
9152 	/* Abort if there are any pending TX packets */
9153 	if (dp_get_tx_pending(opaque_pdev) > 0) {
9154 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
9155 			  FL("Abort suspend due to pending TX packets"));
9156 		return QDF_STATUS_E_AGAIN;
9157 	}
9158 
9159 	if (soc->intr_mode == DP_INTR_POLL)
9160 		qdf_timer_stop(&soc->int_timer);
9161 
9162 	return QDF_STATUS_SUCCESS;
9163 }
9164 
9165 /**
9166  * dp_runtime_resume() - ensure DP is ready to runtime resume
9167  * @opaque_pdev: DP pdev context
9168  *
9169  * Resume DP for runtime PM.
9170  *
9171  * Return: QDF_STATUS
9172  */
9173 static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
9174 {
9175 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
9176 	struct dp_soc *soc = pdev->soc;
9177 	void *hal_srng;
9178 	int i;
9179 
9180 	if (soc->intr_mode == DP_INTR_POLL)
9181 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
9182 
9183 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
9184 		hal_srng = soc->tcl_data_ring[i].hal_srng;
9185 		if (hal_srng) {
9186 			/* We actually only need to acquire the lock */
9187 			hal_srng_access_start(soc->hal_soc, hal_srng);
9188 			/* Update SRC ring head pointer for HW to send
9189 			   all pending packets */
9190 			hal_srng_access_end(soc->hal_soc, hal_srng);
9191 		}
9192 	}
9193 
9194 	return QDF_STATUS_SUCCESS;
9195 }
9196 #endif /* FEATURE_RUNTIME_PM */
9197 
9198 static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
9199 {
9200 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
9201 	struct dp_soc *soc = pdev->soc;
9202 	int timeout = SUSPEND_DRAIN_WAIT;
9203 	int drain_wait_delay = 50; /* 50 ms */
9204 
9205 	/* Abort if there are any pending TX packets */
9206 	while (dp_get_tx_pending(opaque_pdev) > 0) {
9207 		qdf_sleep(drain_wait_delay);
9208 		if (timeout <= 0) {
9209 			dp_err("TX frames are pending, abort suspend");
9210 			return QDF_STATUS_E_TIMEOUT;
9211 		}
9212 		timeout = timeout - drain_wait_delay;
9213 	}
9214 
9215 
9216 	if (soc->intr_mode == DP_INTR_POLL)
9217 		qdf_timer_stop(&soc->int_timer);
9218 
9219 	return QDF_STATUS_SUCCESS;
9220 }
9221 
9222 static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
9223 {
9224 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
9225 	struct dp_soc *soc = pdev->soc;
9226 
9227 	if (soc->intr_mode == DP_INTR_POLL)
9228 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
9229 
9230 	return QDF_STATUS_SUCCESS;
9231 }
9232 
9233 #ifndef CONFIG_WIN
9234 static struct cdp_misc_ops dp_ops_misc = {
9235 	.tx_non_std = dp_tx_non_std,
9236 	.get_opmode = dp_get_opmode,
9237 #ifdef FEATURE_RUNTIME_PM
9238 	.runtime_suspend = dp_runtime_suspend,
9239 	.runtime_resume = dp_runtime_resume,
9240 #endif /* FEATURE_RUNTIME_PM */
9241 	.pkt_log_init = dp_pkt_log_init,
9242 	.pkt_log_con_service = dp_pkt_log_con_service,
9243 	.get_num_rx_contexts = dp_get_num_rx_contexts,
9244 };
9245 
9246 static struct cdp_flowctl_ops dp_ops_flowctl = {
9247 	/* WIFI 3.0 DP implement as required. */
9248 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
9249 	.flow_pool_map_handler = dp_tx_flow_pool_map,
9250 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
9251 	.register_pause_cb = dp_txrx_register_pause_cb,
9252 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
9253 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
9254 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
9255 };
9256 
9257 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
9258 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9259 };
9260 
9261 #ifdef IPA_OFFLOAD
9262 static struct cdp_ipa_ops dp_ops_ipa = {
9263 	.ipa_get_resource = dp_ipa_get_resource,
9264 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
9265 	.ipa_op_response = dp_ipa_op_response,
9266 	.ipa_register_op_cb = dp_ipa_register_op_cb,
9267 	.ipa_get_stat = dp_ipa_get_stat,
9268 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
9269 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
9270 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
9271 	.ipa_setup = dp_ipa_setup,
9272 	.ipa_cleanup = dp_ipa_cleanup,
9273 	.ipa_setup_iface = dp_ipa_setup_iface,
9274 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
9275 	.ipa_enable_pipes = dp_ipa_enable_pipes,
9276 	.ipa_disable_pipes = dp_ipa_disable_pipes,
9277 	.ipa_set_perf_level = dp_ipa_set_perf_level
9278 };
9279 #endif
9280 
9281 static struct cdp_bus_ops dp_ops_bus = {
9282 	.bus_suspend = dp_bus_suspend,
9283 	.bus_resume = dp_bus_resume
9284 };
9285 
9286 static struct cdp_ocb_ops dp_ops_ocb = {
9287 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9288 };
9289 
9290 
9291 static struct cdp_throttle_ops dp_ops_throttle = {
9292 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9293 };
9294 
9295 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
9296 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9297 };
9298 
9299 static struct cdp_cfg_ops dp_ops_cfg = {
9300 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9301 };
9302 
9303 /*
9304  * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
9305  * @dev: physical device instance
9306  * @peer_mac_addr: peer mac address
9307  * @local_id: local id for the peer
9308  * @debug_id: to track enum peer access
9309  *
9310  * Return: peer instance pointer
9311  */
9312 static inline void *
9313 dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
9314 			     uint8_t *local_id,
9315 			     enum peer_debug_id_type debug_id)
9316 {
9317 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
9318 	struct dp_peer *peer;
9319 
9320 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
9321 
9322 	if (!peer)
9323 		return NULL;
9324 
9325 	*local_id = peer->local_id;
9326 	DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
9327 
9328 	return peer;
9329 }
9330 
9331 /*
9332  * dp_peer_release_ref - release peer ref count
9333  * @peer: peer handle
9334  * @debug_id: to track enum peer access
9335  *
9336  * Return: None
9337  */
9338 static inline
9339 void dp_peer_release_ref(void *peer, enum peer_debug_id_type debug_id)
9340 {
9341 	dp_peer_unref_delete(peer);
9342 }
9343 
9344 static struct cdp_peer_ops dp_ops_peer = {
9345 	.register_peer = dp_register_peer,
9346 	.clear_peer = dp_clear_peer,
9347 	.find_peer_by_addr = dp_find_peer_by_addr,
9348 	.find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
9349 	.peer_get_ref_by_addr = dp_peer_get_ref_find_by_addr,
9350 	.peer_release_ref = dp_peer_release_ref,
9351 	.local_peer_id = dp_local_peer_id,
9352 	.peer_find_by_local_id = dp_peer_find_by_local_id,
9353 	.peer_state_update = dp_peer_state_update,
9354 	.get_vdevid = dp_get_vdevid,
9355 	.get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
9356 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
9357 	.get_vdev_for_peer = dp_get_vdev_for_peer,
9358 	.get_peer_state = dp_get_peer_state,
9359 };
9360 #endif
9361 
9362 static struct cdp_ops dp_txrx_ops = {
9363 	.cmn_drv_ops = &dp_ops_cmn,
9364 	.ctrl_ops = &dp_ops_ctrl,
9365 	.me_ops = &dp_ops_me,
9366 	.mon_ops = &dp_ops_mon,
9367 	.host_stats_ops = &dp_ops_host_stats,
9368 	.wds_ops = &dp_ops_wds,
9369 	.raw_ops = &dp_ops_raw,
9370 #ifdef CONFIG_WIN
9371 	.pflow_ops = &dp_ops_pflow,
9372 #endif /* CONFIG_WIN */
9373 #ifndef CONFIG_WIN
9374 	.misc_ops = &dp_ops_misc,
9375 	.cfg_ops = &dp_ops_cfg,
9376 	.flowctl_ops = &dp_ops_flowctl,
9377 	.l_flowctl_ops = &dp_ops_l_flowctl,
9378 #ifdef IPA_OFFLOAD
9379 	.ipa_ops = &dp_ops_ipa,
9380 #endif
9381 	.bus_ops = &dp_ops_bus,
9382 	.ocb_ops = &dp_ops_ocb,
9383 	.peer_ops = &dp_ops_peer,
9384 	.throttle_ops = &dp_ops_throttle,
9385 	.mob_stats_ops = &dp_ops_mob_stats,
9386 #endif
9387 };
9388 
9389 /*
9390  * dp_soc_set_txrx_ring_map()
9391  * @dp_soc: DP handler for soc
9392  *
9393  * Return: Void
9394  */
9395 static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
9396 {
9397 	uint32_t i;
9398 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
9399 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
9400 	}
9401 }
9402 
9403 #ifdef QCA_WIFI_QCA8074
9404 
9405 #ifndef QCA_MEM_ATTACH_ON_WIFI3
9406 
9407 /**
9408  * dp_soc_attach_wifi3() - Attach txrx SOC
9409  * @ctrl_psoc: Opaque SOC handle from control plane
9410  * @htc_handle: Opaque HTC handle
9411  * @hif_handle: Opaque HIF handle
9412  * @qdf_osdev: QDF device
9413  * @ol_ops: Offload Operations
9414  * @device_id: Device ID
9415  *
9416  * Return: DP SOC handle on success, NULL on failure
9417  */
9418 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
9419 			  HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9420 			  struct ol_if_ops *ol_ops, uint16_t device_id)
9421 {
9422 	struct dp_soc *dp_soc =  NULL;
9423 
9424 	dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
9425 			       ol_ops, device_id);
9426 	if (!dp_soc)
9427 		return NULL;
9428 
9429 	if (!dp_soc_init(dp_soc, htc_handle, hif_handle))
9430 		return NULL;
9431 
9432 	return (void *)dp_soc;
9433 }
9434 #else
9435 
9436 /**
9437  * dp_soc_attach_wifi3() - Attach txrx SOC
9438  * @ctrl_psoc: Opaque SOC handle from control plane
9439  * @htc_handle: Opaque HTC handle
9440  * @hif_handle: Opaque HIF handle
9441  * @qdf_osdev: QDF device
9442  * @ol_ops: Offload Operations
9443  * @device_id: Device ID
9444  *
9445  * Return: DP SOC handle on success, NULL on failure
9446  */
9447 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
9448 			  HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9449 			  struct ol_if_ops *ol_ops, uint16_t device_id)
9450 {
9451 	struct dp_soc *dp_soc = NULL;
9452 
9453 	dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
9454 			       ol_ops, device_id);
9455 	return (void *)dp_soc;
9456 }
9457 
9458 #endif
9459 
9460 /**
9461  * dp_soc_attach() - Attach txrx SOC
9462  * @ctrl_psoc: Opaque SOC handle from control plane
9463  * @htc_handle: Opaque HTC handle
9464  * @qdf_osdev: QDF device
9465  * @ol_ops: Offload Operations
9466  * @device_id: Device ID
9467  *
9468  * Return: DP SOC handle on success, NULL on failure
9469  */
9470 static struct dp_soc *
9471 dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9472 	      struct ol_if_ops *ol_ops, uint16_t device_id)
9473 {
9474 	int int_ctx;
9475 	struct dp_soc *soc =  NULL;
9476 	struct htt_soc *htt_soc = NULL;
9477 
9478 	soc = qdf_mem_malloc(sizeof(*soc));
9479 
9480 	if (!soc) {
9481 		dp_err("DP SOC memory allocation failed");
9482 		goto fail0;
9483 	}
9484 
9485 	int_ctx = 0;
9486 	soc->device_id = device_id;
9487 	soc->cdp_soc.ops = &dp_txrx_ops;
9488 	soc->cdp_soc.ol_ops = ol_ops;
9489 	soc->ctrl_psoc = ctrl_psoc;
9490 	soc->osdev = qdf_osdev;
9491 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
9492 
9493 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
9494 	if (!soc->wlan_cfg_ctx) {
9495 		dp_err("wlan_cfg_ctx failed\n");
9496 		goto fail1;
9497 	}
9498 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
9499 	if (!htt_soc) {
9500 		dp_err("HTT attach failed");
9501 		goto fail1;
9502 	}
9503 	soc->htt_handle = htt_soc;
9504 	htt_soc->dp_soc = soc;
9505 	htt_soc->htc_soc = htc_handle;
9506 
9507 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
9508 		goto fail2;
9509 
9510 	return (void *)soc;
9511 fail2:
9512 	qdf_mem_free(htt_soc);
9513 fail1:
9514 	qdf_mem_free(soc);
9515 fail0:
9516 	return NULL;
9517 }
9518 
9519 /**
9520  * dp_soc_init() - Initialize txrx SOC
9521  * @dp_soc: Opaque DP SOC handle
9522  * @htc_handle: Opaque HTC handle
9523  * @hif_handle: Opaque HIF handle
9524  *
9525  * Return: DP SOC handle on success, NULL on failure
9526  */
9527 void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle)
9528 {
9529 	int target_type;
9530 	struct dp_soc *soc = (struct dp_soc *)dpsoc;
9531 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
9532 
9533 	htt_soc->htc_soc = htc_handle;
9534 	soc->hif_handle = hif_handle;
9535 
9536 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
9537 	if (!soc->hal_soc)
9538 		return NULL;
9539 
9540 	htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc, htt_soc->htc_soc,
9541 			   soc->hal_soc, soc->osdev);
9542 	target_type = hal_get_target_type(soc->hal_soc);
9543 	switch (target_type) {
9544 	case TARGET_TYPE_QCA6290:
9545 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9546 					       REO_DST_RING_SIZE_QCA6290);
9547 		soc->ast_override_support = 1;
9548 		break;
9549 #ifdef QCA_WIFI_QCA6390
9550 	case TARGET_TYPE_QCA6390:
9551 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9552 					       REO_DST_RING_SIZE_QCA6290);
9553 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
9554 		soc->ast_override_support = 1;
9555 		if (con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
9556 			int int_ctx;
9557 
9558 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
9559 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
9560 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
9561 			}
9562 		}
9563 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
9564 		break;
9565 #endif
9566 	case TARGET_TYPE_QCA8074:
9567 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9568 					       REO_DST_RING_SIZE_QCA8074);
9569 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
9570 		break;
9571 	case TARGET_TYPE_QCA8074V2:
9572 	case TARGET_TYPE_QCA6018:
9573 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9574 					       REO_DST_RING_SIZE_QCA8074);
9575 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
9576 		soc->hw_nac_monitor_support = 1;
9577 		soc->ast_override_support = 1;
9578 		soc->per_tid_basize_max_tid = 8;
9579 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
9580 		break;
9581 	default:
9582 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
9583 		qdf_assert_always(0);
9584 		break;
9585 	}
9586 
9587 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
9588 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
9589 	soc->cce_disable = false;
9590 
9591 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
9592 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
9593 				CDP_CFG_MAX_PEER_ID);
9594 
9595 		if (ret != -EINVAL) {
9596 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
9597 		}
9598 
9599 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
9600 				CDP_CFG_CCE_DISABLE);
9601 		if (ret == 1)
9602 			soc->cce_disable = true;
9603 	}
9604 
9605 	qdf_spinlock_create(&soc->peer_ref_mutex);
9606 	qdf_spinlock_create(&soc->ast_lock);
9607 	dp_soc_wds_attach(soc);
9608 
9609 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
9610 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
9611 
9612 	/* fill the tx/rx cpu ring map*/
9613 	dp_soc_set_txrx_ring_map(soc);
9614 
9615 	qdf_spinlock_create(&soc->htt_stats.lock);
9616 	/* initialize work queue for stats processing */
9617 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
9618 
9619 	return soc;
9620 
9621 }
9622 
9623 /**
9624  * dp_soc_init_wifi3() - Initialize txrx SOC
9625  * @dp_soc: Opaque DP SOC handle
9626  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
9627  * @hif_handle: Opaque HIF handle
9628  * @htc_handle: Opaque HTC handle
9629  * @qdf_osdev: QDF device (Unused)
9630  * @ol_ops: Offload Operations (Unused)
9631  * @device_id: Device ID (Unused)
9632  *
9633  * Return: DP SOC handle on success, NULL on failure
9634  */
9635 void *dp_soc_init_wifi3(void *dpsoc, void *ctrl_psoc, void *hif_handle,
9636 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9637 			struct ol_if_ops *ol_ops, uint16_t device_id)
9638 {
9639 	return dp_soc_init(dpsoc, htc_handle, hif_handle);
9640 }
9641 
9642 #endif
9643 
9644 /*
9645  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
9646  *
9647  * @soc: handle to DP soc
9648  * @mac_id: MAC id
9649  *
9650  * Return: Return pdev corresponding to MAC
9651  */
9652 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
9653 {
9654 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
9655 		return soc->pdev_list[mac_id];
9656 
9657 	/* Typically for MCL as there only 1 PDEV*/
9658 	return soc->pdev_list[0];
9659 }
9660 
9661 /*
9662  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
9663  * @soc:		DP SoC context
9664  * @max_mac_rings:	No of MAC rings
9665  *
9666  * Return: None
9667  */
9668 static
9669 void dp_is_hw_dbs_enable(struct dp_soc *soc,
9670 				int *max_mac_rings)
9671 {
9672 	bool dbs_enable = false;
9673 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
9674 		dbs_enable = soc->cdp_soc.ol_ops->
9675 		is_hw_dbs_2x2_capable(soc->ctrl_psoc);
9676 
9677 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
9678 }
9679 
9680 /*
9681 * dp_set_pktlog_wifi3() - attach txrx vdev
9682 * @pdev: Datapath PDEV handle
9683 * @event: which event's notifications are being subscribed to
9684 * @enable: WDI event subscribe or not. (True or False)
9685 *
9686 * Return: Success, NULL on failure
9687 */
9688 #ifdef WDI_EVENT_ENABLE
9689 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
9690 	bool enable)
9691 {
9692 	struct dp_soc *soc = NULL;
9693 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
9694 	int max_mac_rings = wlan_cfg_get_num_mac_rings
9695 					(pdev->wlan_cfg_ctx);
9696 	uint8_t mac_id = 0;
9697 
9698 	soc = pdev->soc;
9699 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
9700 
9701 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
9702 			FL("Max_mac_rings %d "),
9703 			max_mac_rings);
9704 
9705 	if (enable) {
9706 		switch (event) {
9707 		case WDI_EVENT_RX_DESC:
9708 			if (pdev->monitor_vdev) {
9709 				/* Nothing needs to be done if monitor mode is
9710 				 * enabled
9711 				 */
9712 				return 0;
9713 			}
9714 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
9715 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
9716 				htt_tlv_filter.mpdu_start = 1;
9717 				htt_tlv_filter.msdu_start = 1;
9718 				htt_tlv_filter.msdu_end = 1;
9719 				htt_tlv_filter.mpdu_end = 1;
9720 				htt_tlv_filter.packet_header = 1;
9721 				htt_tlv_filter.attention = 1;
9722 				htt_tlv_filter.ppdu_start = 1;
9723 				htt_tlv_filter.ppdu_end = 1;
9724 				htt_tlv_filter.ppdu_end_user_stats = 1;
9725 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
9726 				htt_tlv_filter.ppdu_end_status_done = 1;
9727 				htt_tlv_filter.enable_fp = 1;
9728 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
9729 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
9730 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
9731 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
9732 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
9733 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
9734 
9735 				for (mac_id = 0; mac_id < max_mac_rings;
9736 								mac_id++) {
9737 					int mac_for_pdev =
9738 						dp_get_mac_id_for_pdev(mac_id,
9739 								pdev->pdev_id);
9740 
9741 					htt_h2t_rx_ring_cfg(soc->htt_handle,
9742 					 mac_for_pdev,
9743 					 pdev->rxdma_mon_status_ring[mac_id]
9744 					 .hal_srng,
9745 					 RXDMA_MONITOR_STATUS,
9746 					 RX_BUFFER_SIZE,
9747 					 &htt_tlv_filter);
9748 
9749 				}
9750 
9751 				if (soc->reap_timer_init)
9752 					qdf_timer_mod(&soc->mon_reap_timer,
9753 					DP_INTR_POLL_TIMER_MS);
9754 			}
9755 			break;
9756 
9757 		case WDI_EVENT_LITE_RX:
9758 			if (pdev->monitor_vdev) {
9759 				/* Nothing needs to be done if monitor mode is
9760 				 * enabled
9761 				 */
9762 				return 0;
9763 			}
9764 
9765 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
9766 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
9767 
9768 				htt_tlv_filter.ppdu_start = 1;
9769 				htt_tlv_filter.ppdu_end = 1;
9770 				htt_tlv_filter.ppdu_end_user_stats = 1;
9771 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
9772 				htt_tlv_filter.ppdu_end_status_done = 1;
9773 				htt_tlv_filter.mpdu_start = 1;
9774 				htt_tlv_filter.enable_fp = 1;
9775 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
9776 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
9777 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
9778 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
9779 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
9780 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
9781 
9782 				for (mac_id = 0; mac_id < max_mac_rings;
9783 								mac_id++) {
9784 					int mac_for_pdev =
9785 						dp_get_mac_id_for_pdev(mac_id,
9786 								pdev->pdev_id);
9787 
9788 					htt_h2t_rx_ring_cfg(soc->htt_handle,
9789 					mac_for_pdev,
9790 					pdev->rxdma_mon_status_ring[mac_id]
9791 					.hal_srng,
9792 					RXDMA_MONITOR_STATUS,
9793 					RX_BUFFER_SIZE_PKTLOG_LITE,
9794 					&htt_tlv_filter);
9795 				}
9796 
9797 				if (soc->reap_timer_init)
9798 					qdf_timer_mod(&soc->mon_reap_timer,
9799 					DP_INTR_POLL_TIMER_MS);
9800 			}
9801 			break;
9802 
9803 		case WDI_EVENT_LITE_T2H:
9804 			if (pdev->monitor_vdev) {
9805 				/* Nothing needs to be done if monitor mode is
9806 				 * enabled
9807 				 */
9808 				return 0;
9809 			}
9810 
9811 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
9812 				int mac_for_pdev = dp_get_mac_id_for_pdev(
9813 							mac_id,	pdev->pdev_id);
9814 
9815 				pdev->pktlog_ppdu_stats = true;
9816 				dp_h2t_cfg_stats_msg_send(pdev,
9817 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
9818 					mac_for_pdev);
9819 			}
9820 			break;
9821 
9822 		default:
9823 			/* Nothing needs to be done for other pktlog types */
9824 			break;
9825 		}
9826 	} else {
9827 		switch (event) {
9828 		case WDI_EVENT_RX_DESC:
9829 		case WDI_EVENT_LITE_RX:
9830 			if (pdev->monitor_vdev) {
9831 				/* Nothing needs to be done if monitor mode is
9832 				 * enabled
9833 				 */
9834 				return 0;
9835 			}
9836 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
9837 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
9838 
9839 				for (mac_id = 0; mac_id < max_mac_rings;
9840 								mac_id++) {
9841 					int mac_for_pdev =
9842 						dp_get_mac_id_for_pdev(mac_id,
9843 								pdev->pdev_id);
9844 
9845 					htt_h2t_rx_ring_cfg(soc->htt_handle,
9846 					  mac_for_pdev,
9847 					  pdev->rxdma_mon_status_ring[mac_id]
9848 					  .hal_srng,
9849 					  RXDMA_MONITOR_STATUS,
9850 					  RX_BUFFER_SIZE,
9851 					  &htt_tlv_filter);
9852 				}
9853 
9854 				if (soc->reap_timer_init)
9855 					qdf_timer_stop(&soc->mon_reap_timer);
9856 			}
9857 			break;
9858 		case WDI_EVENT_LITE_T2H:
9859 			if (pdev->monitor_vdev) {
9860 				/* Nothing needs to be done if monitor mode is
9861 				 * enabled
9862 				 */
9863 				return 0;
9864 			}
9865 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
9866 			 * passing value 0. Once these macros will define in htt
9867 			 * header file will use proper macros
9868 			*/
9869 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
9870 				int mac_for_pdev =
9871 						dp_get_mac_id_for_pdev(mac_id,
9872 								pdev->pdev_id);
9873 
9874 				pdev->pktlog_ppdu_stats = false;
9875 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
9876 					dp_h2t_cfg_stats_msg_send(pdev, 0,
9877 								mac_for_pdev);
9878 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
9879 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
9880 								mac_for_pdev);
9881 				} else if (pdev->enhanced_stats_en) {
9882 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
9883 								mac_for_pdev);
9884 				}
9885 			}
9886 
9887 			break;
9888 		default:
9889 			/* Nothing needs to be done for other pktlog types */
9890 			break;
9891 		}
9892 	}
9893 	return 0;
9894 }
9895 #endif
9896