xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision dae10a5fbc53d54c53c4ba24fa018ad8b1e7c008)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_htt.h"
31 #include "dp_types.h"
32 #include "dp_internal.h"
33 #include "dp_tx.h"
34 #include "dp_tx_desc.h"
35 #include "dp_rx.h"
36 #include <cdp_txrx_handle.h>
37 #include <wlan_cfg.h>
38 #include "cdp_txrx_cmn_struct.h"
39 #include "cdp_txrx_stats_struct.h"
40 #include "cdp_txrx_cmn_reg.h"
41 #include <qdf_util.h>
42 #include "dp_peer.h"
43 #include "dp_rx_mon.h"
44 #include "htt_stats.h"
45 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
46 #include "cfg_ucfg_api.h"
47 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
48 #include "cdp_txrx_flow_ctrl_v2.h"
49 #else
50 static inline void
51 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
52 {
53 	return;
54 }
55 #endif
56 #include "dp_ipa.h"
57 
58 #include "dp_cal_client_api.h"
59 
60 #ifdef CONFIG_MCL
61 #ifndef REMOVE_PKT_LOG
62 #include <pktlog_ac_api.h>
63 #include <pktlog_ac.h>
64 #endif
65 #endif
66 static void dp_pktlogmod_exit(struct dp_pdev *handle);
67 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
68 				uint8_t *peer_mac_addr,
69 				struct cdp_ctrl_objmgr_peer *ctrl_peer);
70 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
71 static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
72 static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
73 
74 #define DP_INTR_POLL_TIMER_MS	10
75 #define DP_WDS_AGING_TIMER_DEFAULT_MS	120000
76 #define DP_MCS_LENGTH (6*MAX_MCS)
77 #define DP_NSS_LENGTH (6*SS_COUNT)
78 #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
79 #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
80 #define DP_MAX_MCS_STRING_LEN 30
81 #define DP_CURR_FW_STATS_AVAIL 19
82 #define DP_HTT_DBG_EXT_STATS_MAX 256
83 #define DP_MAX_SLEEP_TIME 100
84 
85 #ifdef IPA_OFFLOAD
86 /* Exclude IPA rings from the interrupt context */
87 #define TX_RING_MASK_VAL	0xb
88 #define RX_RING_MASK_VAL	0x7
89 #else
90 #define TX_RING_MASK_VAL	0xF
91 #define RX_RING_MASK_VAL	0xF
92 #endif
93 
94 #define STR_MAXLEN	64
95 
96 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
97 
98 /* PPDU stats mask sent to FW to enable enhanced stats */
99 #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
100 /* PPDU stats mask sent to FW to support debug sniffer feature */
101 #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
102 /* PPDU stats mask sent to FW to support BPR feature*/
103 #define DP_PPDU_STATS_CFG_BPR 0x2000
104 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
105 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
106 				   DP_PPDU_STATS_CFG_ENH_STATS)
107 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
108 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
109 				      DP_PPDU_TXLITE_STATS_BITMASK_CFG)
110 
111 #define RNG_ERR		"SRNG setup failed for"
112 /**
113  * default_dscp_tid_map - Default DSCP-TID mapping
114  *
115  * DSCP        TID
116  * 000000      0
117  * 001000      1
118  * 010000      2
119  * 011000      3
120  * 100000      4
121  * 101000      5
122  * 110000      6
123  * 111000      7
124  */
125 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
126 	0, 0, 0, 0, 0, 0, 0, 0,
127 	1, 1, 1, 1, 1, 1, 1, 1,
128 	2, 2, 2, 2, 2, 2, 2, 2,
129 	3, 3, 3, 3, 3, 3, 3, 3,
130 	4, 4, 4, 4, 4, 4, 4, 4,
131 	5, 5, 5, 5, 5, 5, 5, 5,
132 	6, 6, 6, 6, 6, 6, 6, 6,
133 	7, 7, 7, 7, 7, 7, 7, 7,
134 };
135 
136 /*
137  * struct dp_rate_debug
138  *
139  * @mcs_type: print string for a given mcs
140  * @valid: valid mcs rate?
141  */
142 struct dp_rate_debug {
143 	char mcs_type[DP_MAX_MCS_STRING_LEN];
144 	uint8_t valid;
145 };
146 
147 #define MCS_VALID 1
148 #define MCS_INVALID 0
149 
150 static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
151 
152 	{
153 		{"OFDM 48 Mbps", MCS_VALID},
154 		{"OFDM 24 Mbps", MCS_VALID},
155 		{"OFDM 12 Mbps", MCS_VALID},
156 		{"OFDM 6 Mbps ", MCS_VALID},
157 		{"OFDM 54 Mbps", MCS_VALID},
158 		{"OFDM 36 Mbps", MCS_VALID},
159 		{"OFDM 18 Mbps", MCS_VALID},
160 		{"OFDM 9 Mbps ", MCS_VALID},
161 		{"INVALID ", MCS_INVALID},
162 		{"INVALID ", MCS_INVALID},
163 		{"INVALID ", MCS_INVALID},
164 		{"INVALID ", MCS_INVALID},
165 		{"INVALID ", MCS_VALID},
166 	},
167 	{
168 		{"CCK 11 Mbps Long  ", MCS_VALID},
169 		{"CCK 5.5 Mbps Long ", MCS_VALID},
170 		{"CCK 2 Mbps Long   ", MCS_VALID},
171 		{"CCK 1 Mbps Long   ", MCS_VALID},
172 		{"CCK 11 Mbps Short ", MCS_VALID},
173 		{"CCK 5.5 Mbps Short", MCS_VALID},
174 		{"CCK 2 Mbps Short  ", MCS_VALID},
175 		{"INVALID ", MCS_INVALID},
176 		{"INVALID ", MCS_INVALID},
177 		{"INVALID ", MCS_INVALID},
178 		{"INVALID ", MCS_INVALID},
179 		{"INVALID ", MCS_INVALID},
180 		{"INVALID ", MCS_VALID},
181 	},
182 	{
183 		{"HT MCS 0 (BPSK 1/2)  ", MCS_VALID},
184 		{"HT MCS 1 (QPSK 1/2)  ", MCS_VALID},
185 		{"HT MCS 2 (QPSK 3/4)  ", MCS_VALID},
186 		{"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
187 		{"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
188 		{"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
189 		{"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
190 		{"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
191 		{"INVALID ", MCS_INVALID},
192 		{"INVALID ", MCS_INVALID},
193 		{"INVALID ", MCS_INVALID},
194 		{"INVALID ", MCS_INVALID},
195 		{"INVALID ", MCS_VALID},
196 	},
197 	{
198 		{"VHT MCS 0 (BPSK 1/2)     ", MCS_VALID},
199 		{"VHT MCS 1 (QPSK 1/2)     ", MCS_VALID},
200 		{"VHT MCS 2 (QPSK 3/4)     ", MCS_VALID},
201 		{"VHT MCS 3 (16-QAM 1/2)   ", MCS_VALID},
202 		{"VHT MCS 4 (16-QAM 3/4)   ", MCS_VALID},
203 		{"VHT MCS 5 (64-QAM 2/3)   ", MCS_VALID},
204 		{"VHT MCS 6 (64-QAM 3/4)   ", MCS_VALID},
205 		{"VHT MCS 7 (64-QAM 5/6)   ", MCS_VALID},
206 		{"VHT MCS 8 (256-QAM 3/4)  ", MCS_VALID},
207 		{"VHT MCS 9 (256-QAM 5/6)  ", MCS_VALID},
208 		{"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
209 		{"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
210 		{"INVALID ", MCS_VALID},
211 	},
212 	{
213 		{"HE MCS 0 (BPSK 1/2)     ", MCS_VALID},
214 		{"HE MCS 1 (QPSK 1/2)     ", MCS_VALID},
215 		{"HE MCS 2 (QPSK 3/4)     ", MCS_VALID},
216 		{"HE MCS 3 (16-QAM 1/2)   ", MCS_VALID},
217 		{"HE MCS 4 (16-QAM 3/4)   ", MCS_VALID},
218 		{"HE MCS 5 (64-QAM 2/3)   ", MCS_VALID},
219 		{"HE MCS 6 (64-QAM 3/4)   ", MCS_VALID},
220 		{"HE MCS 7 (64-QAM 5/6)   ", MCS_VALID},
221 		{"HE MCS 8 (256-QAM 3/4)  ", MCS_VALID},
222 		{"HE MCS 9 (256-QAM 5/6)  ", MCS_VALID},
223 		{"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
224 		{"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
225 		{"INVALID ", MCS_VALID},
226 	}
227 };
228 
229 /**
230  * @brief Cpu ring map types
231  */
232 enum dp_cpu_ring_map_types {
233 	DP_DEFAULT_MAP,
234 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
235 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
236 	DP_NSS_ALL_RADIO_OFFLOADED_MAP,
237 	DP_CPU_RING_MAP_MAX
238 };
239 
240 /**
241  * @brief Cpu to tx ring map
242  */
243 static uint8_t dp_cpu_ring_map[DP_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
244 	{0x0, 0x1, 0x2, 0x0},
245 	{0x1, 0x2, 0x1, 0x2},
246 	{0x0, 0x2, 0x0, 0x2},
247 	{0x2, 0x2, 0x2, 0x2}
248 };
249 
250 /**
251  * @brief Select the type of statistics
252  */
253 enum dp_stats_type {
254 	STATS_FW = 0,
255 	STATS_HOST = 1,
256 	STATS_TYPE_MAX = 2,
257 };
258 
259 /**
260  * @brief General Firmware statistics options
261  *
262  */
263 enum dp_fw_stats {
264 	TXRX_FW_STATS_INVALID	= -1,
265 };
266 
267 /**
268  * dp_stats_mapping_table - Firmware and Host statistics
269  * currently supported
270  */
271 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
272 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
273 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
274 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
275 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
276 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
277 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
278 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
279 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
280 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
281 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
282 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
283 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
284 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
285 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
286 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
287 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
288 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
289 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
290 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
291 	/* Last ENUM for HTT FW STATS */
292 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
293 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
294 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
295 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
296 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
297 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
298 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
299 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
300 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
301 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
302 };
303 
304 /* MCL specific functions */
305 #ifdef CONFIG_MCL
306 /**
307  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
308  * @soc: pointer to dp_soc handle
309  * @intr_ctx_num: interrupt context number for which mon mask is needed
310  *
311  * For MCL, monitor mode rings are being processed in timer contexts (polled).
312  * This function is returning 0, since in interrupt mode(softirq based RX),
313  * we donot want to process monitor mode rings in a softirq.
314  *
315  * So, in case packet log is enabled for SAP/STA/P2P modes,
316  * regular interrupt processing will not process monitor mode rings. It would be
317  * done in a separate timer context.
318  *
319  * Return: 0
320  */
321 static inline
322 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
323 {
324 	return 0;
325 }
326 
327 /*
328  * dp_service_mon_rings()- timer to reap monitor rings
329  * reqd as we are not getting ppdu end interrupts
330  * @arg: SoC Handle
331  *
332  * Return:
333  *
334  */
335 static void dp_service_mon_rings(void *arg)
336 {
337 	struct dp_soc *soc = (struct dp_soc *)arg;
338 	int ring = 0, work_done, mac_id;
339 	struct dp_pdev *pdev = NULL;
340 
341 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
342 		pdev = soc->pdev_list[ring];
343 		if (!pdev)
344 			continue;
345 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
346 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
347 								pdev->pdev_id);
348 			work_done = dp_mon_process(soc, mac_for_pdev,
349 						   QCA_NAPI_BUDGET);
350 
351 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
352 				  FL("Reaped %d descs from Monitor rings"),
353 				  work_done);
354 		}
355 	}
356 
357 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
358 }
359 
360 #ifndef REMOVE_PKT_LOG
361 /**
362  * dp_pkt_log_init() - API to initialize packet log
363  * @ppdev: physical device handle
364  * @scn: HIF context
365  *
366  * Return: none
367  */
368 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
369 {
370 	struct dp_pdev *handle = (struct dp_pdev *)ppdev;
371 
372 	if (handle->pkt_log_init) {
373 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
374 			  "%s: Packet log not initialized", __func__);
375 		return;
376 	}
377 
378 	pktlog_sethandle(&handle->pl_dev, scn);
379 	pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
380 
381 	if (pktlogmod_init(scn)) {
382 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
383 			  "%s: pktlogmod_init failed", __func__);
384 		handle->pkt_log_init = false;
385 	} else {
386 		handle->pkt_log_init = true;
387 	}
388 }
389 
390 /**
391  * dp_pkt_log_con_service() - connect packet log service
392  * @ppdev: physical device handle
393  * @scn: device context
394  *
395  * Return: none
396  */
397 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
398 {
399 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
400 
401 	dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
402 	pktlog_htc_attach();
403 }
404 
405 /**
406  * dp_pktlogmod_exit() - API to cleanup pktlog info
407  * @handle: Pdev handle
408  *
409  * Return: none
410  */
411 static void dp_pktlogmod_exit(struct dp_pdev *handle)
412 {
413 	void *scn = (void *)handle->soc->hif_handle;
414 
415 	if (!scn) {
416 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
417 			  "%s: Invalid hif(scn) handle", __func__);
418 		return;
419 	}
420 
421 	pktlogmod_exit(scn);
422 	handle->pkt_log_init = false;
423 }
424 #endif
425 #else
426 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
427 
428 /**
429  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
430  * @soc: pointer to dp_soc handle
431  * @intr_ctx_num: interrupt context number for which mon mask is needed
432  *
433  * Return: mon mask value
434  */
435 static inline
436 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
437 {
438 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
439 }
440 #endif
441 
442 /**
443  * dp_get_dp_vdev_from_cdp_vdev() - get dp_vdev from cdp_vdev by type-casting
444  * @cdp_opaque_vdev: pointer to cdp_vdev
445  *
446  * Return: pointer to dp_vdev
447  */
448 static
449 struct dp_vdev * dp_get_dp_vdev_from_cdp_vdev(struct cdp_vdev *cdp_opaque_vdev)
450 {
451 	return (struct dp_vdev *)cdp_opaque_vdev;
452 }
453 
454 
455 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
456 					struct cdp_peer *peer_hdl,
457 					uint8_t *mac_addr,
458 					enum cdp_txrx_ast_entry_type type,
459 					uint32_t flags)
460 {
461 
462 	return dp_peer_add_ast((struct dp_soc *)soc_hdl,
463 				(struct dp_peer *)peer_hdl,
464 				mac_addr,
465 				type,
466 				flags);
467 }
468 
469 static void dp_peer_del_ast_wifi3(struct cdp_soc_t *soc_hdl,
470 					 void *ast_entry_hdl)
471 {
472 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
473 	qdf_spin_lock_bh(&soc->ast_lock);
474 	dp_peer_del_ast((struct dp_soc *)soc_hdl,
475 			(struct dp_ast_entry *)ast_entry_hdl);
476 	qdf_spin_unlock_bh(&soc->ast_lock);
477 }
478 
479 
480 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
481 						struct cdp_peer *peer_hdl,
482 						uint8_t *wds_macaddr,
483 						uint32_t flags)
484 {
485 	int status = -1;
486 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
487 	struct dp_ast_entry  *ast_entry = NULL;
488 
489 	qdf_spin_lock_bh(&soc->ast_lock);
490 	ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
491 
492 	if (ast_entry) {
493 		status = dp_peer_update_ast(soc,
494 					    (struct dp_peer *)peer_hdl,
495 					   ast_entry, flags);
496 	}
497 
498 	qdf_spin_unlock_bh(&soc->ast_lock);
499 
500 	return status;
501 }
502 
503 /*
504  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
505  * @soc_handle:		Datapath SOC handle
506  * @wds_macaddr:	WDS entry MAC Address
507  * Return: None
508  */
509 static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
510 				   uint8_t *wds_macaddr, void *vdev_handle)
511 {
512 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
513 	struct dp_ast_entry *ast_entry = NULL;
514 
515 	qdf_spin_lock_bh(&soc->ast_lock);
516 	ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
517 
518 	if (ast_entry) {
519 		if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
520 			(ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
521 			(ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS)) {
522 			ast_entry->is_active = TRUE;
523 		}
524 	}
525 
526 	qdf_spin_unlock_bh(&soc->ast_lock);
527 }
528 
529 /*
530  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
531  * @soc:		Datapath SOC handle
532  *
533  * Return: None
534  */
535 static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
536 					 void *vdev_hdl)
537 {
538 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
539 	struct dp_pdev *pdev;
540 	struct dp_vdev *vdev;
541 	struct dp_peer *peer;
542 	struct dp_ast_entry *ase, *temp_ase;
543 	int i;
544 
545 	qdf_spin_lock_bh(&soc->ast_lock);
546 
547 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
548 		pdev = soc->pdev_list[i];
549 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
550 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
551 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
552 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
553 					if ((ase->type ==
554 						CDP_TXRX_AST_TYPE_STATIC) ||
555 						(ase->type ==
556 						CDP_TXRX_AST_TYPE_SELF) ||
557 						(ase->type ==
558 						CDP_TXRX_AST_TYPE_STA_BSS))
559 						continue;
560 					ase->is_active = TRUE;
561 				}
562 			}
563 		}
564 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
565 	}
566 
567 	qdf_spin_unlock_bh(&soc->ast_lock);
568 }
569 
570 /*
571  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
572  * @soc:		Datapath SOC handle
573  *
574  * Return: None
575  */
576 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
577 {
578 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
579 	struct dp_pdev *pdev;
580 	struct dp_vdev *vdev;
581 	struct dp_peer *peer;
582 	struct dp_ast_entry *ase, *temp_ase;
583 	int i;
584 
585 	qdf_spin_lock_bh(&soc->ast_lock);
586 
587 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
588 		pdev = soc->pdev_list[i];
589 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
590 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
591 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
592 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
593 					if ((ase->type ==
594 						CDP_TXRX_AST_TYPE_STATIC) ||
595 						(ase->type ==
596 						 CDP_TXRX_AST_TYPE_SELF) ||
597 						(ase->type ==
598 						 CDP_TXRX_AST_TYPE_STA_BSS))
599 						continue;
600 					dp_peer_del_ast(soc, ase);
601 				}
602 			}
603 		}
604 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
605 	}
606 
607 	qdf_spin_unlock_bh(&soc->ast_lock);
608 }
609 
610 static void *dp_peer_ast_hash_find_wifi3(struct cdp_soc_t *soc_hdl,
611 						uint8_t *ast_mac_addr)
612 {
613 	struct dp_ast_entry *ast_entry;
614 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
615 	qdf_spin_lock_bh(&soc->ast_lock);
616 	ast_entry = dp_peer_ast_hash_find(soc, ast_mac_addr);
617 	qdf_spin_unlock_bh(&soc->ast_lock);
618 	return (void *)ast_entry;
619 }
620 
621 static uint8_t dp_peer_ast_get_pdev_id_wifi3(struct cdp_soc_t *soc_hdl,
622 							void *ast_entry_hdl)
623 {
624 	return dp_peer_ast_get_pdev_id((struct dp_soc *)soc_hdl,
625 					(struct dp_ast_entry *)ast_entry_hdl);
626 }
627 
628 static uint8_t dp_peer_ast_get_next_hop_wifi3(struct cdp_soc_t *soc_hdl,
629 							void *ast_entry_hdl)
630 {
631 	return dp_peer_ast_get_next_hop((struct dp_soc *)soc_hdl,
632 					(struct dp_ast_entry *)ast_entry_hdl);
633 }
634 
635 static void dp_peer_ast_set_type_wifi3(
636 					struct cdp_soc_t *soc_hdl,
637 					void *ast_entry_hdl,
638 					enum cdp_txrx_ast_entry_type type)
639 {
640 	dp_peer_ast_set_type((struct dp_soc *)soc_hdl,
641 				(struct dp_ast_entry *)ast_entry_hdl,
642 				type);
643 }
644 
645 static enum cdp_txrx_ast_entry_type dp_peer_ast_get_type_wifi3(
646 					struct cdp_soc_t *soc_hdl,
647 					void *ast_entry_hdl)
648 {
649 	return ((struct dp_ast_entry *)ast_entry_hdl)->type;
650 }
651 
652 #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND)
653 void dp_peer_ast_set_cp_ctx_wifi3(struct cdp_soc_t *soc_handle,
654 				  void *ast_entry,
655 				  void *cp_ctx)
656 {
657 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
658 
659 	qdf_spin_lock_bh(&soc->ast_lock);
660 	dp_peer_ast_set_cp_ctx(soc,
661 			       (struct dp_ast_entry *)ast_entry, cp_ctx);
662 	qdf_spin_unlock_bh(&soc->ast_lock);
663 }
664 
665 void *dp_peer_ast_get_cp_ctx_wifi3(struct cdp_soc_t *soc_handle,
666 				   void *ast_entry)
667 {
668 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
669 	void *cp_ctx = NULL;
670 
671 	qdf_spin_lock_bh(&soc->ast_lock);
672 	cp_ctx = dp_peer_ast_get_cp_ctx(soc,
673 					(struct dp_ast_entry *)ast_entry);
674 	qdf_spin_unlock_bh(&soc->ast_lock);
675 
676 	return cp_ctx;
677 }
678 
679 bool dp_peer_ast_get_wmi_sent_wifi3(struct cdp_soc_t *soc_handle,
680 				    void *ast_entry)
681 {
682 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
683 	bool wmi_sent = false;
684 
685 	qdf_spin_lock_bh(&soc->ast_lock);
686 	wmi_sent = dp_peer_ast_get_wmi_sent(soc,
687 					    (struct dp_ast_entry *)ast_entry);
688 	qdf_spin_unlock_bh(&soc->ast_lock);
689 
690 	return wmi_sent;
691 }
692 
693 void dp_peer_ast_free_entry_wifi3(struct cdp_soc_t *soc_handle,
694 				  void *ast_entry)
695 {
696 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
697 
698 	qdf_spin_lock_bh(&soc->ast_lock);
699 	dp_peer_ast_free_entry(soc, (struct dp_ast_entry *)ast_entry);
700 	qdf_spin_unlock_bh(&soc->ast_lock);
701 }
702 #endif
703 
704 /**
705  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
706  * @ring_num: ring num of the ring being queried
707  * @grp_mask: the grp_mask array for the ring type in question.
708  *
709  * The grp_mask array is indexed by group number and the bit fields correspond
710  * to ring numbers.  We are finding which interrupt group a ring belongs to.
711  *
712  * Return: the index in the grp_mask array with the ring number.
713  * -QDF_STATUS_E_NOENT if no entry is found
714  */
715 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
716 {
717 	int ext_group_num;
718 	int mask = 1 << ring_num;
719 
720 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
721 	     ext_group_num++) {
722 		if (mask & grp_mask[ext_group_num])
723 			return ext_group_num;
724 	}
725 
726 	return -QDF_STATUS_E_NOENT;
727 }
728 
729 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
730 				       enum hal_ring_type ring_type,
731 				       int ring_num)
732 {
733 	int *grp_mask;
734 
735 	switch (ring_type) {
736 	case WBM2SW_RELEASE:
737 		/* dp_tx_comp_handler - soc->tx_comp_ring */
738 		if (ring_num < 3)
739 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
740 
741 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
742 		else if (ring_num == 3) {
743 			/* sw treats this as a separate ring type */
744 			grp_mask = &soc->wlan_cfg_ctx->
745 				int_rx_wbm_rel_ring_mask[0];
746 			ring_num = 0;
747 		} else {
748 			qdf_assert(0);
749 			return -QDF_STATUS_E_NOENT;
750 		}
751 	break;
752 
753 	case REO_EXCEPTION:
754 		/* dp_rx_err_process - &soc->reo_exception_ring */
755 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
756 	break;
757 
758 	case REO_DST:
759 		/* dp_rx_process - soc->reo_dest_ring */
760 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
761 	break;
762 
763 	case REO_STATUS:
764 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
765 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
766 	break;
767 
768 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
769 	case RXDMA_MONITOR_STATUS:
770 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
771 	case RXDMA_MONITOR_DST:
772 		/* dp_mon_process */
773 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
774 	break;
775 	case RXDMA_DST:
776 		/* dp_rxdma_err_process */
777 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
778 	break;
779 
780 	case RXDMA_BUF:
781 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
782 	break;
783 
784 	case RXDMA_MONITOR_BUF:
785 		/* TODO: support low_thresh interrupt */
786 		return -QDF_STATUS_E_NOENT;
787 	break;
788 
789 	case TCL_DATA:
790 	case TCL_CMD:
791 	case REO_CMD:
792 	case SW2WBM_RELEASE:
793 	case WBM_IDLE_LINK:
794 		/* normally empty SW_TO_HW rings */
795 		return -QDF_STATUS_E_NOENT;
796 	break;
797 
798 	case TCL_STATUS:
799 	case REO_REINJECT:
800 		/* misc unused rings */
801 		return -QDF_STATUS_E_NOENT;
802 	break;
803 
804 	case CE_SRC:
805 	case CE_DST:
806 	case CE_DST_STATUS:
807 		/* CE_rings - currently handled by hif */
808 	default:
809 		return -QDF_STATUS_E_NOENT;
810 	break;
811 	}
812 
813 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
814 }
815 
816 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
817 			      *ring_params, int ring_type, int ring_num)
818 {
819 	int msi_group_number;
820 	int msi_data_count;
821 	int ret;
822 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
823 
824 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
825 					    &msi_data_count, &msi_data_start,
826 					    &msi_irq_start);
827 
828 	if (ret)
829 		return;
830 
831 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
832 						       ring_num);
833 	if (msi_group_number < 0) {
834 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
835 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
836 			ring_type, ring_num);
837 		ring_params->msi_addr = 0;
838 		ring_params->msi_data = 0;
839 		return;
840 	}
841 
842 	if (msi_group_number > msi_data_count) {
843 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
844 			FL("2 msi_groups will share an msi; msi_group_num %d"),
845 			msi_group_number);
846 
847 		QDF_ASSERT(0);
848 	}
849 
850 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
851 
852 	ring_params->msi_addr = addr_low;
853 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
854 	ring_params->msi_data = (msi_group_number % msi_data_count)
855 		+ msi_data_start;
856 	ring_params->flags |= HAL_SRNG_MSI_INTR;
857 }
858 
859 /**
860  * dp_print_ast_stats() - Dump AST table contents
861  * @soc: Datapath soc handle
862  *
863  * return void
864  */
865 #ifdef FEATURE_AST
866 static void dp_print_ast_stats(struct dp_soc *soc)
867 {
868 	uint8_t i;
869 	uint8_t num_entries = 0;
870 	struct dp_vdev *vdev;
871 	struct dp_pdev *pdev;
872 	struct dp_peer *peer;
873 	struct dp_ast_entry *ase, *tmp_ase;
874 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
875 			"NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS"};
876 
877 	DP_PRINT_STATS("AST Stats:");
878 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
879 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
880 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
881 	DP_PRINT_STATS("AST Table:");
882 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
883 		pdev = soc->pdev_list[i];
884 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
885 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
886 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
887 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
888 					DP_PRINT_STATS("%6d mac_addr = %pM"
889 							" peer_mac_addr = %pM"
890 							" type = %s"
891 							" next_hop = %d"
892 							" is_active = %d"
893 							" is_bss = %d"
894 							" ast_idx = %d"
895 							" ast_hash = %d"
896 							" pdev_id = %d"
897 							" vdev_id = %d",
898 							++num_entries,
899 							ase->mac_addr.raw,
900 							ase->peer->mac_addr.raw,
901 							type[ase->type],
902 							ase->next_hop,
903 							ase->is_active,
904 							ase->is_bss,
905 							ase->ast_idx,
906 							ase->ast_hash_value,
907 							ase->pdev_id,
908 							ase->vdev_id);
909 				}
910 			}
911 		}
912 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
913 	}
914 }
915 #else
916 static void dp_print_ast_stats(struct dp_soc *soc)
917 {
918 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
919 	return;
920 }
921 #endif
922 
923 static void dp_print_peer_table(struct dp_vdev *vdev)
924 {
925 	struct dp_peer *peer = NULL;
926 
927 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
928 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
929 		if (!peer) {
930 			DP_PRINT_STATS("Invalid Peer");
931 			return;
932 		}
933 		DP_PRINT_STATS("    peer_mac_addr = %pM"
934 			" nawds_enabled = %d"
935 			" bss_peer = %d"
936 			" wapi = %d"
937 			" wds_enabled = %d"
938 			" delete in progress = %d",
939 			peer->mac_addr.raw,
940 			peer->nawds_enabled,
941 			peer->bss_peer,
942 			peer->wapi,
943 			peer->wds_enabled,
944 			peer->delete_in_progress);
945 	}
946 }
947 
948 /*
949  * dp_setup_srng - Internal function to setup SRNG rings used by data path
950  */
951 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
952 	int ring_type, int ring_num, int mac_id, uint32_t num_entries)
953 {
954 	void *hal_soc = soc->hal_soc;
955 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
956 	/* TODO: See if we should get align size from hal */
957 	uint32_t ring_base_align = 8;
958 	struct hal_srng_params ring_params;
959 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
960 
961 	/* TODO: Currently hal layer takes care of endianness related settings.
962 	 * See if these settings need to passed from DP layer
963 	 */
964 	ring_params.flags = 0;
965 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
966 		  FL("Ring type: %d, num:%d"), ring_type, ring_num);
967 
968 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
969 	srng->hal_srng = NULL;
970 	srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
971 	srng->num_entries = num_entries;
972 	srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
973 		soc->osdev, soc->osdev->dev, srng->alloc_size,
974 		&(srng->base_paddr_unaligned));
975 
976 	if (!srng->base_vaddr_unaligned) {
977 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
978 			FL("alloc failed - ring_type: %d, ring_num %d"),
979 			ring_type, ring_num);
980 		return QDF_STATUS_E_NOMEM;
981 	}
982 
983 	ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
984 		((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
985 	ring_params.ring_base_paddr = srng->base_paddr_unaligned +
986 		((unsigned long)(ring_params.ring_base_vaddr) -
987 		(unsigned long)srng->base_vaddr_unaligned);
988 	ring_params.num_entries = num_entries;
989 
990 	if (soc->intr_mode == DP_INTR_MSI) {
991 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
992 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
993 			  FL("Using MSI for ring_type: %d, ring_num %d"),
994 			  ring_type, ring_num);
995 
996 	} else {
997 		ring_params.msi_data = 0;
998 		ring_params.msi_addr = 0;
999 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1000 			  FL("Skipping MSI for ring_type: %d, ring_num %d"),
1001 			  ring_type, ring_num);
1002 	}
1003 
1004 	/*
1005 	 * Setup interrupt timer and batch counter thresholds for
1006 	 * interrupt mitigation based on ring type
1007 	 */
1008 	if (ring_type == REO_DST) {
1009 		ring_params.intr_timer_thres_us =
1010 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1011 		ring_params.intr_batch_cntr_thres_entries =
1012 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1013 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
1014 		ring_params.intr_timer_thres_us =
1015 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1016 		ring_params.intr_batch_cntr_thres_entries =
1017 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1018 	} else {
1019 		ring_params.intr_timer_thres_us =
1020 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1021 		ring_params.intr_batch_cntr_thres_entries =
1022 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1023 	}
1024 
1025 	/* Enable low threshold interrupts for rx buffer rings (regular and
1026 	 * monitor buffer rings.
1027 	 * TODO: See if this is required for any other ring
1028 	 */
1029 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1030 		(ring_type == RXDMA_MONITOR_STATUS)) {
1031 		/* TODO: Setting low threshold to 1/8th of ring size
1032 		 * see if this needs to be configurable
1033 		 */
1034 		ring_params.low_threshold = num_entries >> 3;
1035 		ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1036 		ring_params.intr_timer_thres_us =
1037 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1038 		ring_params.intr_batch_cntr_thres_entries = 0;
1039 	}
1040 
1041 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
1042 		mac_id, &ring_params);
1043 
1044 	if (!srng->hal_srng) {
1045 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1046 				srng->alloc_size,
1047 				srng->base_vaddr_unaligned,
1048 				srng->base_paddr_unaligned, 0);
1049 	}
1050 
1051 	return 0;
1052 }
1053 
1054 /**
1055  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
1056  * Any buffers allocated and attached to ring entries are expected to be freed
1057  * before calling this function.
1058  */
1059 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
1060 	int ring_type, int ring_num)
1061 {
1062 	if (!srng->hal_srng) {
1063 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1064 			FL("Ring type: %d, num:%d not setup"),
1065 			ring_type, ring_num);
1066 		return;
1067 	}
1068 
1069 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1070 
1071 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1072 				srng->alloc_size,
1073 				srng->base_vaddr_unaligned,
1074 				srng->base_paddr_unaligned, 0);
1075 	srng->hal_srng = NULL;
1076 }
1077 
1078 /* TODO: Need this interface from HIF */
1079 void *hif_get_hal_handle(void *hif_handle);
1080 
1081 /*
1082  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1083  * @dp_ctx: DP SOC handle
1084  * @budget: Number of frames/descriptors that can be processed in one shot
1085  *
1086  * Return: remaining budget/quota for the soc device
1087  */
1088 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
1089 {
1090 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1091 	struct dp_soc *soc = int_ctx->soc;
1092 	int ring = 0;
1093 	uint32_t work_done  = 0;
1094 	int budget = dp_budget;
1095 	uint8_t tx_mask = int_ctx->tx_ring_mask;
1096 	uint8_t rx_mask = int_ctx->rx_ring_mask;
1097 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1098 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1099 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1100 	uint32_t remaining_quota = dp_budget;
1101 	struct dp_pdev *pdev = NULL;
1102 	int mac_id;
1103 
1104 	/* Process Tx completion interrupts first to return back buffers */
1105 	while (tx_mask) {
1106 		if (tx_mask & 0x1) {
1107 			work_done = dp_tx_comp_handler(soc,
1108 					soc->tx_comp_ring[ring].hal_srng,
1109 					remaining_quota);
1110 
1111 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1112 				"tx mask 0x%x ring %d, budget %d, work_done %d",
1113 				tx_mask, ring, budget, work_done);
1114 
1115 			budget -= work_done;
1116 			if (budget <= 0)
1117 				goto budget_done;
1118 
1119 			remaining_quota = budget;
1120 		}
1121 		tx_mask = tx_mask >> 1;
1122 		ring++;
1123 	}
1124 
1125 
1126 	/* Process REO Exception ring interrupt */
1127 	if (rx_err_mask) {
1128 		work_done = dp_rx_err_process(soc,
1129 				soc->reo_exception_ring.hal_srng,
1130 				remaining_quota);
1131 
1132 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1133 			"REO Exception Ring: work_done %d budget %d",
1134 			work_done, budget);
1135 
1136 		budget -=  work_done;
1137 		if (budget <= 0) {
1138 			goto budget_done;
1139 		}
1140 		remaining_quota = budget;
1141 	}
1142 
1143 	/* Process Rx WBM release ring interrupt */
1144 	if (rx_wbm_rel_mask) {
1145 		work_done = dp_rx_wbm_err_process(soc,
1146 				soc->rx_rel_ring.hal_srng, remaining_quota);
1147 
1148 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1149 			"WBM Release Ring: work_done %d budget %d",
1150 			work_done, budget);
1151 
1152 		budget -=  work_done;
1153 		if (budget <= 0) {
1154 			goto budget_done;
1155 		}
1156 		remaining_quota = budget;
1157 	}
1158 
1159 	/* Process Rx interrupts */
1160 	if (rx_mask) {
1161 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1162 			if (rx_mask & (1 << ring)) {
1163 				work_done = dp_rx_process(int_ctx,
1164 					    soc->reo_dest_ring[ring].hal_srng,
1165 					    ring,
1166 					    remaining_quota);
1167 
1168 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1169 					"rx mask 0x%x ring %d, work_done %d budget %d",
1170 					rx_mask, ring, work_done, budget);
1171 
1172 				budget -=  work_done;
1173 				if (budget <= 0)
1174 					goto budget_done;
1175 				remaining_quota = budget;
1176 			}
1177 		}
1178 		for (ring = 0; ring < MAX_RX_MAC_RINGS; ring++) {
1179 			work_done = dp_rxdma_err_process(soc, ring,
1180 						remaining_quota);
1181 			budget -= work_done;
1182 		}
1183 	}
1184 
1185 	if (reo_status_mask)
1186 		dp_reo_status_ring_handler(soc);
1187 
1188 	/* Process LMAC interrupts */
1189 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
1190 		pdev = soc->pdev_list[ring];
1191 		if (pdev == NULL)
1192 			continue;
1193 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1194 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
1195 								pdev->pdev_id);
1196 
1197 			if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1198 				work_done = dp_mon_process(soc, mac_for_pdev,
1199 						remaining_quota);
1200 				budget -= work_done;
1201 				if (budget <= 0)
1202 					goto budget_done;
1203 				remaining_quota = budget;
1204 			}
1205 
1206 			if (int_ctx->rxdma2host_ring_mask &
1207 					(1 << mac_for_pdev)) {
1208 				work_done = dp_rxdma_err_process(soc,
1209 							mac_for_pdev,
1210 							remaining_quota);
1211 				budget -=  work_done;
1212 				if (budget <= 0)
1213 					goto budget_done;
1214 				remaining_quota = budget;
1215 			}
1216 
1217 			if (int_ctx->host2rxdma_ring_mask &
1218 						(1 << mac_for_pdev)) {
1219 				union dp_rx_desc_list_elem_t *desc_list = NULL;
1220 				union dp_rx_desc_list_elem_t *tail = NULL;
1221 				struct dp_srng *rx_refill_buf_ring =
1222 					&pdev->rx_refill_buf_ring;
1223 
1224 				DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1225 						1);
1226 				dp_rx_buffers_replenish(soc, mac_for_pdev,
1227 					rx_refill_buf_ring,
1228 					&soc->rx_desc_buf[mac_for_pdev], 0,
1229 					&desc_list, &tail);
1230 			}
1231 		}
1232 	}
1233 
1234 	qdf_lro_flush(int_ctx->lro_ctx);
1235 
1236 budget_done:
1237 	return dp_budget - budget;
1238 }
1239 
1240 /* dp_interrupt_timer()- timer poll for interrupts
1241  *
1242  * @arg: SoC Handle
1243  *
1244  * Return:
1245  *
1246  */
1247 static void dp_interrupt_timer(void *arg)
1248 {
1249 	struct dp_soc *soc = (struct dp_soc *) arg;
1250 	int i;
1251 
1252 	if (qdf_atomic_read(&soc->cmn_init_done)) {
1253 		for (i = 0;
1254 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1255 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
1256 
1257 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1258 	}
1259 }
1260 
1261 /*
1262  * dp_soc_interrupt_attach_poll() - Register handlers for DP interrupts
1263  * @txrx_soc: DP SOC handle
1264  *
1265  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1266  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1267  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1268  *
1269  * Return: 0 for success. nonzero for failure.
1270  */
1271 static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
1272 {
1273 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1274 	int i;
1275 
1276 	soc->intr_mode = DP_INTR_POLL;
1277 
1278 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1279 		soc->intr_ctx[i].dp_intr_id = i;
1280 		soc->intr_ctx[i].tx_ring_mask =
1281 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1282 		soc->intr_ctx[i].rx_ring_mask =
1283 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1284 		soc->intr_ctx[i].rx_mon_ring_mask =
1285 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1286 		soc->intr_ctx[i].rx_err_ring_mask =
1287 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1288 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1289 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1290 		soc->intr_ctx[i].reo_status_ring_mask =
1291 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1292 		soc->intr_ctx[i].rxdma2host_ring_mask =
1293 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1294 		soc->intr_ctx[i].soc = soc;
1295 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1296 	}
1297 
1298 	qdf_timer_init(soc->osdev, &soc->int_timer,
1299 			dp_interrupt_timer, (void *)soc,
1300 			QDF_TIMER_TYPE_WAKE_APPS);
1301 
1302 	return QDF_STATUS_SUCCESS;
1303 }
1304 
1305 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
1306 #if defined(CONFIG_MCL)
1307 extern int con_mode_monitor;
1308 /*
1309  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1310  * @txrx_soc: DP SOC handle
1311  *
1312  * Call the appropriate attach function based on the mode of operation.
1313  * This is a WAR for enabling monitor mode.
1314  *
1315  * Return: 0 for success. nonzero for failure.
1316  */
1317 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1318 {
1319 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1320 
1321 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1322 	     con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
1323 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1324 				  "%s: Poll mode", __func__);
1325 		return dp_soc_attach_poll(txrx_soc);
1326 	} else {
1327 
1328 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1329 				  "%s: Interrupt  mode", __func__);
1330 		return dp_soc_interrupt_attach(txrx_soc);
1331 	}
1332 }
1333 #else
1334 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
1335 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1336 {
1337 	return dp_soc_attach_poll(txrx_soc);
1338 }
1339 #else
1340 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1341 {
1342 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1343 
1344 	if (hif_is_polled_mode_enabled(soc->hif_handle))
1345 		return dp_soc_attach_poll(txrx_soc);
1346 	else
1347 		return dp_soc_interrupt_attach(txrx_soc);
1348 }
1349 #endif
1350 #endif
1351 
1352 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1353 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1354 {
1355 	int j;
1356 	int num_irq = 0;
1357 
1358 	int tx_mask =
1359 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1360 	int rx_mask =
1361 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1362 	int rx_mon_mask =
1363 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1364 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1365 					soc->wlan_cfg_ctx, intr_ctx_num);
1366 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1367 					soc->wlan_cfg_ctx, intr_ctx_num);
1368 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1369 					soc->wlan_cfg_ctx, intr_ctx_num);
1370 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1371 					soc->wlan_cfg_ctx, intr_ctx_num);
1372 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1373 					soc->wlan_cfg_ctx, intr_ctx_num);
1374 
1375 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1376 
1377 		if (tx_mask & (1 << j)) {
1378 			irq_id_map[num_irq++] =
1379 				(wbm2host_tx_completions_ring1 - j);
1380 		}
1381 
1382 		if (rx_mask & (1 << j)) {
1383 			irq_id_map[num_irq++] =
1384 				(reo2host_destination_ring1 - j);
1385 		}
1386 
1387 		if (rxdma2host_ring_mask & (1 << j)) {
1388 			irq_id_map[num_irq++] =
1389 				rxdma2host_destination_ring_mac1 -
1390 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1391 		}
1392 
1393 		if (host2rxdma_ring_mask & (1 << j)) {
1394 			irq_id_map[num_irq++] =
1395 				host2rxdma_host_buf_ring_mac1 -
1396 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1397 		}
1398 
1399 		if (rx_mon_mask & (1 << j)) {
1400 			irq_id_map[num_irq++] =
1401 				ppdu_end_interrupts_mac1 -
1402 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1403 			irq_id_map[num_irq++] =
1404 				rxdma2host_monitor_status_ring_mac1 -
1405 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1406 		}
1407 
1408 		if (rx_wbm_rel_ring_mask & (1 << j))
1409 			irq_id_map[num_irq++] = wbm2host_rx_release;
1410 
1411 		if (rx_err_ring_mask & (1 << j))
1412 			irq_id_map[num_irq++] = reo2host_exception;
1413 
1414 		if (reo_status_ring_mask & (1 << j))
1415 			irq_id_map[num_irq++] = reo2host_status;
1416 
1417 	}
1418 	*num_irq_r = num_irq;
1419 }
1420 
1421 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1422 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1423 		int msi_vector_count, int msi_vector_start)
1424 {
1425 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1426 					soc->wlan_cfg_ctx, intr_ctx_num);
1427 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1428 					soc->wlan_cfg_ctx, intr_ctx_num);
1429 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1430 					soc->wlan_cfg_ctx, intr_ctx_num);
1431 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1432 					soc->wlan_cfg_ctx, intr_ctx_num);
1433 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1434 					soc->wlan_cfg_ctx, intr_ctx_num);
1435 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1436 					soc->wlan_cfg_ctx, intr_ctx_num);
1437 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1438 					soc->wlan_cfg_ctx, intr_ctx_num);
1439 
1440 	unsigned int vector =
1441 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1442 	int num_irq = 0;
1443 
1444 	soc->intr_mode = DP_INTR_MSI;
1445 
1446 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1447 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1448 		irq_id_map[num_irq++] =
1449 			pld_get_msi_irq(soc->osdev->dev, vector);
1450 
1451 	*num_irq_r = num_irq;
1452 }
1453 
1454 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1455 				    int *irq_id_map, int *num_irq)
1456 {
1457 	int msi_vector_count, ret;
1458 	uint32_t msi_base_data, msi_vector_start;
1459 
1460 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1461 					    &msi_vector_count,
1462 					    &msi_base_data,
1463 					    &msi_vector_start);
1464 	if (ret)
1465 		return dp_soc_interrupt_map_calculate_integrated(soc,
1466 				intr_ctx_num, irq_id_map, num_irq);
1467 
1468 	else
1469 		dp_soc_interrupt_map_calculate_msi(soc,
1470 				intr_ctx_num, irq_id_map, num_irq,
1471 				msi_vector_count, msi_vector_start);
1472 }
1473 
1474 /*
1475  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1476  * @txrx_soc: DP SOC handle
1477  *
1478  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1479  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1480  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1481  *
1482  * Return: 0 for success. nonzero for failure.
1483  */
1484 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
1485 {
1486 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1487 
1488 	int i = 0;
1489 	int num_irq = 0;
1490 
1491 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1492 		int ret = 0;
1493 
1494 		/* Map of IRQ ids registered with one interrupt context */
1495 		int irq_id_map[HIF_MAX_GRP_IRQ];
1496 
1497 		int tx_mask =
1498 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1499 		int rx_mask =
1500 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1501 		int rx_mon_mask =
1502 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
1503 		int rx_err_ring_mask =
1504 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1505 		int rx_wbm_rel_ring_mask =
1506 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1507 		int reo_status_ring_mask =
1508 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1509 		int rxdma2host_ring_mask =
1510 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1511 		int host2rxdma_ring_mask =
1512 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1513 
1514 
1515 		soc->intr_ctx[i].dp_intr_id = i;
1516 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1517 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1518 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1519 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1520 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1521 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1522 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1523 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1524 
1525 		soc->intr_ctx[i].soc = soc;
1526 
1527 		num_irq = 0;
1528 
1529 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1530 					       &num_irq);
1531 
1532 		ret = hif_register_ext_group(soc->hif_handle,
1533 				num_irq, irq_id_map, dp_service_srngs,
1534 				&soc->intr_ctx[i], "dp_intr",
1535 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1536 
1537 		if (ret) {
1538 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1539 			FL("failed, ret = %d"), ret);
1540 
1541 			return QDF_STATUS_E_FAILURE;
1542 		}
1543 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1544 	}
1545 
1546 	hif_configure_ext_group_interrupts(soc->hif_handle);
1547 
1548 	return QDF_STATUS_SUCCESS;
1549 }
1550 
1551 /*
1552  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1553  * @txrx_soc: DP SOC handle
1554  *
1555  * Return: void
1556  */
1557 static void dp_soc_interrupt_detach(void *txrx_soc)
1558 {
1559 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1560 	int i;
1561 
1562 	if (soc->intr_mode == DP_INTR_POLL) {
1563 		qdf_timer_stop(&soc->int_timer);
1564 		qdf_timer_free(&soc->int_timer);
1565 	} else {
1566 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1567 	}
1568 
1569 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1570 		soc->intr_ctx[i].tx_ring_mask = 0;
1571 		soc->intr_ctx[i].rx_ring_mask = 0;
1572 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1573 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1574 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1575 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1576 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1577 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1578 
1579 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1580 	}
1581 }
1582 
1583 #define AVG_MAX_MPDUS_PER_TID 128
1584 #define AVG_TIDS_PER_CLIENT 2
1585 #define AVG_FLOWS_PER_TID 2
1586 #define AVG_MSDUS_PER_FLOW 128
1587 #define AVG_MSDUS_PER_MPDU 4
1588 
1589 /*
1590  * Allocate and setup link descriptor pool that will be used by HW for
1591  * various link and queue descriptors and managed by WBM
1592  */
1593 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1594 {
1595 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1596 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1597 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1598 	uint32_t num_mpdus_per_link_desc =
1599 		hal_num_mpdus_per_link_desc(soc->hal_soc);
1600 	uint32_t num_msdus_per_link_desc =
1601 		hal_num_msdus_per_link_desc(soc->hal_soc);
1602 	uint32_t num_mpdu_links_per_queue_desc =
1603 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1604 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1605 	uint32_t total_link_descs, total_mem_size;
1606 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1607 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1608 	uint32_t num_link_desc_banks;
1609 	uint32_t last_bank_size = 0;
1610 	uint32_t entry_size, num_entries;
1611 	int i;
1612 	uint32_t desc_id = 0;
1613 
1614 	/* Only Tx queue descriptors are allocated from common link descriptor
1615 	 * pool Rx queue descriptors are not included in this because (REO queue
1616 	 * extension descriptors) they are expected to be allocated contiguously
1617 	 * with REO queue descriptors
1618 	 */
1619 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1620 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1621 
1622 	num_mpdu_queue_descs = num_mpdu_link_descs /
1623 		num_mpdu_links_per_queue_desc;
1624 
1625 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1626 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1627 		num_msdus_per_link_desc;
1628 
1629 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1630 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1631 
1632 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1633 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1634 
1635 	/* Round up to power of 2 */
1636 	total_link_descs = 1;
1637 	while (total_link_descs < num_entries)
1638 		total_link_descs <<= 1;
1639 
1640 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1641 		FL("total_link_descs: %u, link_desc_size: %d"),
1642 		total_link_descs, link_desc_size);
1643 	total_mem_size =  total_link_descs * link_desc_size;
1644 
1645 	total_mem_size += link_desc_align;
1646 
1647 	if (total_mem_size <= max_alloc_size) {
1648 		num_link_desc_banks = 0;
1649 		last_bank_size = total_mem_size;
1650 	} else {
1651 		num_link_desc_banks = (total_mem_size) /
1652 			(max_alloc_size - link_desc_align);
1653 		last_bank_size = total_mem_size %
1654 			(max_alloc_size - link_desc_align);
1655 	}
1656 
1657 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1658 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
1659 		total_mem_size, num_link_desc_banks);
1660 
1661 	for (i = 0; i < num_link_desc_banks; i++) {
1662 		soc->link_desc_banks[i].base_vaddr_unaligned =
1663 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1664 			max_alloc_size,
1665 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1666 		soc->link_desc_banks[i].size = max_alloc_size;
1667 
1668 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1669 			soc->link_desc_banks[i].base_vaddr_unaligned) +
1670 			((unsigned long)(
1671 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1672 			link_desc_align));
1673 
1674 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
1675 			soc->link_desc_banks[i].base_paddr_unaligned) +
1676 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1677 			(unsigned long)(
1678 			soc->link_desc_banks[i].base_vaddr_unaligned));
1679 
1680 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
1681 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1682 				FL("Link descriptor memory alloc failed"));
1683 			goto fail;
1684 		}
1685 	}
1686 
1687 	if (last_bank_size) {
1688 		/* Allocate last bank in case total memory required is not exact
1689 		 * multiple of max_alloc_size
1690 		 */
1691 		soc->link_desc_banks[i].base_vaddr_unaligned =
1692 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1693 			last_bank_size,
1694 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1695 		soc->link_desc_banks[i].size = last_bank_size;
1696 
1697 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1698 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
1699 			((unsigned long)(
1700 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1701 			link_desc_align));
1702 
1703 		soc->link_desc_banks[i].base_paddr =
1704 			(unsigned long)(
1705 			soc->link_desc_banks[i].base_paddr_unaligned) +
1706 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1707 			(unsigned long)(
1708 			soc->link_desc_banks[i].base_vaddr_unaligned));
1709 	}
1710 
1711 
1712 	/* Allocate and setup link descriptor idle list for HW internal use */
1713 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1714 	total_mem_size = entry_size * total_link_descs;
1715 
1716 	if (total_mem_size <= max_alloc_size) {
1717 		void *desc;
1718 
1719 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
1720 			WBM_IDLE_LINK, 0, 0, total_link_descs)) {
1721 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1722 				FL("Link desc idle ring setup failed"));
1723 			goto fail;
1724 		}
1725 
1726 		hal_srng_access_start_unlocked(soc->hal_soc,
1727 			soc->wbm_idle_link_ring.hal_srng);
1728 
1729 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1730 			soc->link_desc_banks[i].base_paddr; i++) {
1731 			uint32_t num_entries = (soc->link_desc_banks[i].size -
1732 				((unsigned long)(
1733 				soc->link_desc_banks[i].base_vaddr) -
1734 				(unsigned long)(
1735 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1736 				/ link_desc_size;
1737 			unsigned long paddr = (unsigned long)(
1738 				soc->link_desc_banks[i].base_paddr);
1739 
1740 			while (num_entries && (desc = hal_srng_src_get_next(
1741 				soc->hal_soc,
1742 				soc->wbm_idle_link_ring.hal_srng))) {
1743 				hal_set_link_desc_addr(desc,
1744 					LINK_DESC_COOKIE(desc_id, i), paddr);
1745 				num_entries--;
1746 				desc_id++;
1747 				paddr += link_desc_size;
1748 			}
1749 		}
1750 		hal_srng_access_end_unlocked(soc->hal_soc,
1751 			soc->wbm_idle_link_ring.hal_srng);
1752 	} else {
1753 		uint32_t num_scatter_bufs;
1754 		uint32_t num_entries_per_buf;
1755 		uint32_t rem_entries;
1756 		uint8_t *scatter_buf_ptr;
1757 		uint16_t scatter_buf_num;
1758 
1759 		soc->wbm_idle_scatter_buf_size =
1760 			hal_idle_list_scatter_buf_size(soc->hal_soc);
1761 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
1762 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
1763 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1764 					soc->hal_soc, total_mem_size,
1765 					soc->wbm_idle_scatter_buf_size);
1766 
1767 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
1768 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1769 					FL("scatter bufs size out of bounds"));
1770 			goto fail;
1771 		}
1772 
1773 		for (i = 0; i < num_scatter_bufs; i++) {
1774 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
1775 				qdf_mem_alloc_consistent(soc->osdev,
1776 							soc->osdev->dev,
1777 				soc->wbm_idle_scatter_buf_size,
1778 				&(soc->wbm_idle_scatter_buf_base_paddr[i]));
1779 			if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
1780 				QDF_TRACE(QDF_MODULE_ID_DP,
1781 						QDF_TRACE_LEVEL_ERROR,
1782 					FL("Scatter list memory alloc failed"));
1783 				goto fail;
1784 			}
1785 		}
1786 
1787 		/* Populate idle list scatter buffers with link descriptor
1788 		 * pointers
1789 		 */
1790 		scatter_buf_num = 0;
1791 		scatter_buf_ptr = (uint8_t *)(
1792 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
1793 		rem_entries = num_entries_per_buf;
1794 
1795 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1796 			soc->link_desc_banks[i].base_paddr; i++) {
1797 			uint32_t num_link_descs =
1798 				(soc->link_desc_banks[i].size -
1799 				((unsigned long)(
1800 				soc->link_desc_banks[i].base_vaddr) -
1801 				(unsigned long)(
1802 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1803 				/ link_desc_size;
1804 			unsigned long paddr = (unsigned long)(
1805 				soc->link_desc_banks[i].base_paddr);
1806 
1807 			while (num_link_descs) {
1808 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
1809 					LINK_DESC_COOKIE(desc_id, i), paddr);
1810 				num_link_descs--;
1811 				desc_id++;
1812 				paddr += link_desc_size;
1813 				rem_entries--;
1814 				if (rem_entries) {
1815 					scatter_buf_ptr += entry_size;
1816 				} else {
1817 					rem_entries = num_entries_per_buf;
1818 					scatter_buf_num++;
1819 
1820 					if (scatter_buf_num >= num_scatter_bufs)
1821 						break;
1822 
1823 					scatter_buf_ptr = (uint8_t *)(
1824 						soc->wbm_idle_scatter_buf_base_vaddr[
1825 						scatter_buf_num]);
1826 				}
1827 			}
1828 		}
1829 		/* Setup link descriptor idle list in HW */
1830 		hal_setup_link_idle_list(soc->hal_soc,
1831 			soc->wbm_idle_scatter_buf_base_paddr,
1832 			soc->wbm_idle_scatter_buf_base_vaddr,
1833 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
1834 			(uint32_t)(scatter_buf_ptr -
1835 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
1836 			scatter_buf_num-1])), total_link_descs);
1837 	}
1838 	return 0;
1839 
1840 fail:
1841 	if (soc->wbm_idle_link_ring.hal_srng) {
1842 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
1843 				WBM_IDLE_LINK, 0);
1844 	}
1845 
1846 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1847 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1848 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1849 				soc->wbm_idle_scatter_buf_size,
1850 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1851 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1852 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1853 		}
1854 	}
1855 
1856 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1857 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1858 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1859 				soc->link_desc_banks[i].size,
1860 				soc->link_desc_banks[i].base_vaddr_unaligned,
1861 				soc->link_desc_banks[i].base_paddr_unaligned,
1862 				0);
1863 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1864 		}
1865 	}
1866 	return QDF_STATUS_E_FAILURE;
1867 }
1868 
1869 /*
1870  * Free link descriptor pool that was setup HW
1871  */
1872 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
1873 {
1874 	int i;
1875 
1876 	if (soc->wbm_idle_link_ring.hal_srng) {
1877 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
1878 			WBM_IDLE_LINK, 0);
1879 	}
1880 
1881 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1882 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1883 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1884 				soc->wbm_idle_scatter_buf_size,
1885 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1886 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1887 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1888 		}
1889 	}
1890 
1891 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1892 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1893 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1894 				soc->link_desc_banks[i].size,
1895 				soc->link_desc_banks[i].base_vaddr_unaligned,
1896 				soc->link_desc_banks[i].base_paddr_unaligned,
1897 				0);
1898 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1899 		}
1900 	}
1901 }
1902 
1903 #define REO_DST_RING_SIZE_QCA6290 1024
1904 #ifndef QCA_WIFI_QCA8074_VP
1905 #define REO_DST_RING_SIZE_QCA8074 2048
1906 #else
1907 #define REO_DST_RING_SIZE_QCA8074 8
1908 #endif
1909 
1910 /*
1911  * dp_wds_aging_timer_fn() - Timer callback function for WDS aging
1912  * @soc: Datapath SOC handle
1913  *
1914  * This is a timer function used to age out stale AST nodes from
1915  * AST table
1916  */
1917 #ifdef FEATURE_WDS
1918 static void dp_wds_aging_timer_fn(void *soc_hdl)
1919 {
1920 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
1921 	struct dp_pdev *pdev;
1922 	struct dp_vdev *vdev;
1923 	struct dp_peer *peer;
1924 	struct dp_ast_entry *ase, *temp_ase;
1925 	int i;
1926 
1927 	qdf_spin_lock_bh(&soc->ast_lock);
1928 
1929 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1930 		pdev = soc->pdev_list[i];
1931 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
1932 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1933 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1934 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
1935 					/*
1936 					 * Do not expire static ast entries
1937 					 * and HM WDS entries
1938 					 */
1939 					if (ase->type != CDP_TXRX_AST_TYPE_WDS)
1940 						continue;
1941 
1942 					if (ase->is_active) {
1943 						ase->is_active = FALSE;
1944 						continue;
1945 					}
1946 
1947 					DP_STATS_INC(soc, ast.aged_out, 1);
1948 					dp_peer_del_ast(soc, ase);
1949 				}
1950 			}
1951 		}
1952 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1953 	}
1954 
1955 	qdf_spin_unlock_bh(&soc->ast_lock);
1956 
1957 	if (qdf_atomic_read(&soc->cmn_init_done))
1958 		qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1959 }
1960 
1961 
1962 /*
1963  * dp_soc_wds_attach() - Setup WDS timer and AST table
1964  * @soc:		Datapath SOC handle
1965  *
1966  * Return: None
1967  */
1968 static void dp_soc_wds_attach(struct dp_soc *soc)
1969 {
1970 	qdf_timer_init(soc->osdev, &soc->wds_aging_timer,
1971 			dp_wds_aging_timer_fn, (void *)soc,
1972 			QDF_TIMER_TYPE_WAKE_APPS);
1973 
1974 	qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1975 }
1976 
1977 /*
1978  * dp_soc_wds_detach() - Detach WDS data structures and timers
1979  * @txrx_soc: DP SOC handle
1980  *
1981  * Return: None
1982  */
1983 static void dp_soc_wds_detach(struct dp_soc *soc)
1984 {
1985 	qdf_timer_stop(&soc->wds_aging_timer);
1986 	qdf_timer_free(&soc->wds_aging_timer);
1987 }
1988 #else
1989 static void dp_soc_wds_attach(struct dp_soc *soc)
1990 {
1991 }
1992 
1993 static void dp_soc_wds_detach(struct dp_soc *soc)
1994 {
1995 }
1996 #endif
1997 
1998 /*
1999  * dp_soc_reset_ring_map() - Reset cpu ring map
2000  * @soc: Datapath soc handler
2001  *
2002  * This api resets the default cpu ring map
2003  */
2004 
2005 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
2006 {
2007 	uint8_t i;
2008 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2009 
2010 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
2011 		if (nss_config == 1) {
2012 			/*
2013 			 * Setting Tx ring map for one nss offloaded radio
2014 			 */
2015 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
2016 		} else if (nss_config == 2) {
2017 			/*
2018 			 * Setting Tx ring for two nss offloaded radios
2019 			 */
2020 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
2021 		} else {
2022 			/*
2023 			 * Setting Tx ring map for all nss offloaded radios
2024 			 */
2025 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_ALL_RADIO_OFFLOADED_MAP][i];
2026 		}
2027 	}
2028 }
2029 
2030 /*
2031  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
2032  * @dp_soc - DP soc handle
2033  * @ring_type - ring type
2034  * @ring_num - ring_num
2035  *
2036  * return 0 or 1
2037  */
2038 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
2039 {
2040 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2041 	uint8_t status = 0;
2042 
2043 	switch (ring_type) {
2044 	case WBM2SW_RELEASE:
2045 	case REO_DST:
2046 	case RXDMA_BUF:
2047 		status = ((nss_config) & (1 << ring_num));
2048 		break;
2049 	default:
2050 		break;
2051 	}
2052 
2053 	return status;
2054 }
2055 
2056 /*
2057  * dp_soc_reset_intr_mask() - reset interrupt mask
2058  * @dp_soc - DP Soc handle
2059  *
2060  * Return: Return void
2061  */
2062 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
2063 {
2064 	uint8_t j;
2065 	int *grp_mask = NULL;
2066 	int group_number, mask, num_ring;
2067 
2068 	/* number of tx ring */
2069 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2070 
2071 	/*
2072 	 * group mask for tx completion  ring.
2073 	 */
2074 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2075 
2076 	/* loop and reset the mask for only offloaded ring */
2077 	for (j = 0; j < num_ring; j++) {
2078 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2079 			continue;
2080 		}
2081 
2082 		/*
2083 		 * Group number corresponding to tx offloaded ring.
2084 		 */
2085 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2086 		if (group_number < 0) {
2087 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2088 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2089 					WBM2SW_RELEASE, j);
2090 			return;
2091 		}
2092 
2093 		/* reset the tx mask for offloaded ring */
2094 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2095 		mask &= (~(1 << j));
2096 
2097 		/*
2098 		 * reset the interrupt mask for offloaded ring.
2099 		 */
2100 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2101 	}
2102 
2103 	/* number of rx rings */
2104 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2105 
2106 	/*
2107 	 * group mask for reo destination ring.
2108 	 */
2109 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2110 
2111 	/* loop and reset the mask for only offloaded ring */
2112 	for (j = 0; j < num_ring; j++) {
2113 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2114 			continue;
2115 		}
2116 
2117 		/*
2118 		 * Group number corresponding to rx offloaded ring.
2119 		 */
2120 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2121 		if (group_number < 0) {
2122 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2123 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2124 					REO_DST, j);
2125 			return;
2126 		}
2127 
2128 		/* set the interrupt mask for offloaded ring */
2129 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2130 		mask &= (~(1 << j));
2131 
2132 		/*
2133 		 * set the interrupt mask to zero for rx offloaded radio.
2134 		 */
2135 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2136 	}
2137 
2138 	/*
2139 	 * group mask for Rx buffer refill ring
2140 	 */
2141 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2142 
2143 	/* loop and reset the mask for only offloaded ring */
2144 	for (j = 0; j < MAX_PDEV_CNT; j++) {
2145 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2146 			continue;
2147 		}
2148 
2149 		/*
2150 		 * Group number corresponding to rx offloaded ring.
2151 		 */
2152 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2153 		if (group_number < 0) {
2154 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2155 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2156 					REO_DST, j);
2157 			return;
2158 		}
2159 
2160 		/* set the interrupt mask for offloaded ring */
2161 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2162 				group_number);
2163 		mask &= (~(1 << j));
2164 
2165 		/*
2166 		 * set the interrupt mask to zero for rx offloaded radio.
2167 		 */
2168 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2169 			group_number, mask);
2170 	}
2171 }
2172 
2173 #ifdef IPA_OFFLOAD
2174 /**
2175  * dp_reo_remap_config() - configure reo remap register value based
2176  *                         nss configuration.
2177  *		based on offload_radio value below remap configuration
2178  *		get applied.
2179  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2180  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2181  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2182  *		3 - both Radios handled by NSS (remap not required)
2183  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2184  *
2185  * @remap1: output parameter indicates reo remap 1 register value
2186  * @remap2: output parameter indicates reo remap 2 register value
2187  * Return: bool type, true if remap is configured else false.
2188  */
2189 static bool dp_reo_remap_config(struct dp_soc *soc,
2190 				uint32_t *remap1,
2191 				uint32_t *remap2)
2192 {
2193 
2194 	*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
2195 		(0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
2196 
2197 	*remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
2198 		(0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
2199 
2200 	return true;
2201 }
2202 #else
2203 static bool dp_reo_remap_config(struct dp_soc *soc,
2204 				uint32_t *remap1,
2205 				uint32_t *remap2)
2206 {
2207 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2208 
2209 	switch (offload_radio) {
2210 	case 0:
2211 		*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2212 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2213 			(0x3 << 18) | (0x4 << 21)) << 8;
2214 
2215 		*remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2216 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2217 			(0x3 << 18) | (0x4 << 21)) << 8;
2218 		break;
2219 
2220 	case 1:
2221 		*remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
2222 			(0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
2223 			(0x2 << 18) | (0x3 << 21)) << 8;
2224 
2225 		*remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
2226 			(0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
2227 			(0x4 << 18) | (0x2 << 21)) << 8;
2228 		break;
2229 
2230 	case 2:
2231 		*remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
2232 			(0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
2233 			(0x1 << 18) | (0x3 << 21)) << 8;
2234 
2235 		*remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
2236 			(0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
2237 			(0x4 << 18) | (0x1 << 21)) << 8;
2238 		break;
2239 
2240 	case 3:
2241 		/* return false if both radios are offloaded to NSS */
2242 		return false;
2243 	}
2244 	return true;
2245 }
2246 #endif
2247 
2248 /*
2249  * dp_reo_frag_dst_set() - configure reo register to set the
2250  *                        fragment destination ring
2251  * @soc : Datapath soc
2252  * @frag_dst_ring : output parameter to set fragment destination ring
2253  *
2254  * Based on offload_radio below fragment destination rings is selected
2255  * 0 - TCL
2256  * 1 - SW1
2257  * 2 - SW2
2258  * 3 - SW3
2259  * 4 - SW4
2260  * 5 - Release
2261  * 6 - FW
2262  * 7 - alternate select
2263  *
2264  * return: void
2265  */
2266 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2267 {
2268 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2269 
2270 	switch (offload_radio) {
2271 	case 0:
2272 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2273 		break;
2274 	case 3:
2275 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2276 		break;
2277 	default:
2278 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2279 				FL("dp_reo_frag_dst_set invalid offload radio config"));
2280 		break;
2281 	}
2282 }
2283 
2284 /*
2285  * dp_soc_cmn_setup() - Common SoC level initializion
2286  * @soc:		Datapath SOC handle
2287  *
2288  * This is an internal function used to setup common SOC data structures,
2289  * to be called from PDEV attach after receiving HW mode capabilities from FW
2290  */
2291 static int dp_soc_cmn_setup(struct dp_soc *soc)
2292 {
2293 	int i;
2294 	struct hal_reo_params reo_params;
2295 	int tx_ring_size;
2296 	int tx_comp_ring_size;
2297 	int reo_dst_ring_size;
2298 	uint32_t entries;
2299 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2300 
2301 	if (qdf_atomic_read(&soc->cmn_init_done))
2302 		return 0;
2303 
2304 	if (dp_hw_link_desc_pool_setup(soc))
2305 		goto fail1;
2306 
2307 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2308 	/* Setup SRNG rings */
2309 	/* Common rings */
2310 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
2311 		wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx))) {
2312 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2313 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
2314 		goto fail1;
2315 	}
2316 
2317 
2318 	soc->num_tcl_data_rings = 0;
2319 	/* Tx data rings */
2320 	if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
2321 		soc->num_tcl_data_rings =
2322 			wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
2323 		tx_comp_ring_size =
2324 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2325 		tx_ring_size =
2326 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2327 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2328 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
2329 				TCL_DATA, i, 0, tx_ring_size)) {
2330 				QDF_TRACE(QDF_MODULE_ID_DP,
2331 					QDF_TRACE_LEVEL_ERROR,
2332 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
2333 				goto fail1;
2334 			}
2335 			/*
2336 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2337 			 * count
2338 			 */
2339 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
2340 				WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
2341 				QDF_TRACE(QDF_MODULE_ID_DP,
2342 					QDF_TRACE_LEVEL_ERROR,
2343 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
2344 				goto fail1;
2345 			}
2346 		}
2347 	} else {
2348 		/* This will be incremented during per pdev ring setup */
2349 		soc->num_tcl_data_rings = 0;
2350 	}
2351 
2352 	if (dp_tx_soc_attach(soc)) {
2353 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2354 				FL("dp_tx_soc_attach failed"));
2355 		goto fail1;
2356 	}
2357 
2358 	entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
2359 	/* TCL command and status rings */
2360 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
2361 			  entries)) {
2362 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2363 			FL("dp_srng_setup failed for tcl_cmd_ring"));
2364 		goto fail1;
2365 	}
2366 
2367 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
2368 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
2369 			  entries)) {
2370 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2371 			FL("dp_srng_setup failed for tcl_status_ring"));
2372 		goto fail1;
2373 	}
2374 
2375 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2376 
2377 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2378 	 * descriptors
2379 	 */
2380 
2381 	/* Rx data rings */
2382 	if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2383 		soc->num_reo_dest_rings =
2384 			wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
2385 		QDF_TRACE(QDF_MODULE_ID_DP,
2386 			QDF_TRACE_LEVEL_INFO,
2387 			FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
2388 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2389 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2390 				i, 0, reo_dst_ring_size)) {
2391 				QDF_TRACE(QDF_MODULE_ID_DP,
2392 					  QDF_TRACE_LEVEL_ERROR,
2393 					  FL(RNG_ERR "reo_dest_ring [%d]"), i);
2394 				goto fail1;
2395 			}
2396 		}
2397 	} else {
2398 		/* This will be incremented during per pdev ring setup */
2399 		soc->num_reo_dest_rings = 0;
2400 	}
2401 
2402 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2403 	/* LMAC RxDMA to SW Rings configuration */
2404 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
2405 		/* Only valid for MCL */
2406 		struct dp_pdev *pdev = soc->pdev_list[0];
2407 
2408 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2409 			if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2410 					  RXDMA_DST, 0, i,
2411 					  entries)) {
2412 				QDF_TRACE(QDF_MODULE_ID_DP,
2413 					  QDF_TRACE_LEVEL_ERROR,
2414 					  FL(RNG_ERR "rxdma_err_dst_ring"));
2415 				goto fail1;
2416 			}
2417 		}
2418 	}
2419 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
2420 
2421 	/* REO reinjection ring */
2422 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
2423 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2424 			  entries)) {
2425 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2426 			  FL("dp_srng_setup failed for reo_reinject_ring"));
2427 		goto fail1;
2428 	}
2429 
2430 
2431 	/* Rx release ring */
2432 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2433 		wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx))) {
2434 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2435 			  FL("dp_srng_setup failed for rx_rel_ring"));
2436 		goto fail1;
2437 	}
2438 
2439 
2440 	/* Rx exception ring */
2441 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
2442 	if (dp_srng_setup(soc, &soc->reo_exception_ring,
2443 			  REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries)) {
2444 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2445 			  FL("dp_srng_setup failed for reo_exception_ring"));
2446 		goto fail1;
2447 	}
2448 
2449 
2450 	/* REO command and status rings */
2451 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
2452 		wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx))) {
2453 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2454 			FL("dp_srng_setup failed for reo_cmd_ring"));
2455 		goto fail1;
2456 	}
2457 
2458 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2459 	TAILQ_INIT(&soc->rx.reo_cmd_list);
2460 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2461 
2462 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
2463 		wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx))) {
2464 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2465 			FL("dp_srng_setup failed for reo_status_ring"));
2466 		goto fail1;
2467 	}
2468 
2469 	qdf_spinlock_create(&soc->ast_lock);
2470 	dp_soc_wds_attach(soc);
2471 
2472 	/* Reset the cpu ring map if radio is NSS offloaded */
2473 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
2474 		dp_soc_reset_cpu_ring_map(soc);
2475 		dp_soc_reset_intr_mask(soc);
2476 	}
2477 
2478 	/* Setup HW REO */
2479 	qdf_mem_zero(&reo_params, sizeof(reo_params));
2480 
2481 	if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
2482 
2483 		/*
2484 		 * Reo ring remap is not required if both radios
2485 		 * are offloaded to NSS
2486 		 */
2487 		if (!dp_reo_remap_config(soc,
2488 					&reo_params.remap1,
2489 					&reo_params.remap2))
2490 			goto out;
2491 
2492 		reo_params.rx_hash_enabled = true;
2493 	}
2494 
2495 	/* setup the global rx defrag waitlist */
2496 	TAILQ_INIT(&soc->rx.defrag.waitlist);
2497 	soc->rx.defrag.timeout_ms =
2498 		wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
2499 	soc->rx.flags.defrag_timeout_check =
2500 		wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
2501 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
2502 
2503 out:
2504 	/*
2505 	 * set the fragment destination ring
2506 	 */
2507 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2508 
2509 	hal_reo_setup(soc->hal_soc, &reo_params);
2510 
2511 	qdf_atomic_set(&soc->cmn_init_done, 1);
2512 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
2513 	return 0;
2514 fail1:
2515 	/*
2516 	 * Cleanup will be done as part of soc_detach, which will
2517 	 * be called on pdev attach failure
2518 	 */
2519 	return QDF_STATUS_E_FAILURE;
2520 }
2521 
2522 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
2523 
2524 static void dp_lro_hash_setup(struct dp_soc *soc)
2525 {
2526 	struct cdp_lro_hash_config lro_hash;
2527 
2528 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2529 		!wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2530 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2531 			 FL("LRO disabled RX hash disabled"));
2532 		return;
2533 	}
2534 
2535 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2536 
2537 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
2538 		lro_hash.lro_enable = 1;
2539 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2540 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
2541 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2542 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
2543 	}
2544 
2545 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, FL("enabled"));
2546 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
2547 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2548 		 LRO_IPV4_SEED_ARR_SZ));
2549 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2550 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2551 		 LRO_IPV6_SEED_ARR_SZ));
2552 
2553 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2554 		 "lro_hash: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
2555 		 lro_hash.lro_enable, lro_hash.tcp_flag,
2556 		 lro_hash.tcp_flag_mask);
2557 
2558 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2559 		 QDF_TRACE_LEVEL_ERROR,
2560 		 (void *)lro_hash.toeplitz_hash_ipv4,
2561 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2562 		 LRO_IPV4_SEED_ARR_SZ));
2563 
2564 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2565 		 QDF_TRACE_LEVEL_ERROR,
2566 		 (void *)lro_hash.toeplitz_hash_ipv6,
2567 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2568 		 LRO_IPV6_SEED_ARR_SZ));
2569 
2570 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2571 
2572 	if (soc->cdp_soc.ol_ops->lro_hash_config)
2573 		(void)soc->cdp_soc.ol_ops->lro_hash_config
2574 			(soc->ctrl_psoc, &lro_hash);
2575 }
2576 
2577 /*
2578 * dp_rxdma_ring_setup() - configure the RX DMA rings
2579 * @soc: data path SoC handle
2580 * @pdev: Physical device handle
2581 *
2582 * Return: 0 - success, > 0 - failure
2583 */
2584 #ifdef QCA_HOST2FW_RXBUF_RING
2585 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2586 	 struct dp_pdev *pdev)
2587 {
2588 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2589 	int max_mac_rings;
2590 	int i;
2591 
2592 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2593 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
2594 
2595 	for (i = 0; i < max_mac_rings; i++) {
2596 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2597 			 "%s: pdev_id %d mac_id %d",
2598 			 __func__, pdev->pdev_id, i);
2599 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
2600 			RXDMA_BUF, 1, i,
2601 			wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx))) {
2602 			QDF_TRACE(QDF_MODULE_ID_DP,
2603 				 QDF_TRACE_LEVEL_ERROR,
2604 				 FL("failed rx mac ring setup"));
2605 			return QDF_STATUS_E_FAILURE;
2606 		}
2607 	}
2608 	return QDF_STATUS_SUCCESS;
2609 }
2610 #else
2611 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2612 	 struct dp_pdev *pdev)
2613 {
2614 	return QDF_STATUS_SUCCESS;
2615 }
2616 #endif
2617 
2618 /**
2619  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2620  * @pdev - DP_PDEV handle
2621  *
2622  * Return: void
2623  */
2624 static inline void
2625 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2626 {
2627 	uint8_t map_id;
2628 	struct dp_soc *soc = pdev->soc;
2629 
2630 	if (!soc)
2631 		return;
2632 
2633 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2634 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
2635 			     default_dscp_tid_map,
2636 			     sizeof(default_dscp_tid_map));
2637 	}
2638 
2639 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
2640 		hal_tx_set_dscp_tid_map(soc->hal_soc,
2641 					default_dscp_tid_map,
2642 					map_id);
2643 	}
2644 }
2645 
2646 #ifdef QCA_SUPPORT_SON
2647 /**
2648  * dp_mark_peer_inact(): Update peer inactivity status
2649  * @peer_handle - datapath peer handle
2650  *
2651  * Return: void
2652  */
2653 void dp_mark_peer_inact(void *peer_handle, bool inactive)
2654 {
2655 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2656 	struct dp_pdev *pdev;
2657 	struct dp_soc *soc;
2658 	bool inactive_old;
2659 
2660 	if (!peer)
2661 		return;
2662 
2663 	pdev = peer->vdev->pdev;
2664 	soc = pdev->soc;
2665 
2666 	inactive_old = peer->peer_bs_inact_flag == 1;
2667 	if (!inactive)
2668 		peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2669 	peer->peer_bs_inact_flag = inactive ? 1 : 0;
2670 
2671 	if (inactive_old != inactive) {
2672 		/**
2673 		 * Note: a node lookup can happen in RX datapath context
2674 		 * when a node changes from inactive to active (at most once
2675 		 * per inactivity timeout threshold)
2676 		 */
2677 		if (soc->cdp_soc.ol_ops->record_act_change) {
2678 			soc->cdp_soc.ol_ops->record_act_change(
2679 					(void *)pdev->ctrl_pdev,
2680 					peer->mac_addr.raw, !inactive);
2681 		}
2682 	}
2683 }
2684 
2685 /**
2686  * dp_txrx_peer_find_inact_timeout_handler(): Inactivity timeout function
2687  *
2688  * Periodically checks the inactivity status
2689  */
2690 static os_timer_func(dp_txrx_peer_find_inact_timeout_handler)
2691 {
2692 	struct dp_pdev *pdev;
2693 	struct dp_vdev *vdev;
2694 	struct dp_peer *peer;
2695 	struct dp_soc *soc;
2696 	int i;
2697 
2698 	OS_GET_TIMER_ARG(soc, struct dp_soc *);
2699 
2700 	qdf_spin_lock(&soc->peer_ref_mutex);
2701 
2702 	for (i = 0; i < soc->pdev_count; i++) {
2703 	pdev = soc->pdev_list[i];
2704 	if (!pdev)
2705 		continue;
2706 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
2707 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2708 		if (vdev->opmode != wlan_op_mode_ap)
2709 			continue;
2710 
2711 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2712 			if (!peer->authorize) {
2713 				/**
2714 				 * Inactivity check only interested in
2715 				 * connected node
2716 				 */
2717 				continue;
2718 			}
2719 			if (peer->peer_bs_inact > soc->pdev_bs_inact_reload) {
2720 				/**
2721 				 * This check ensures we do not wait extra long
2722 				 * due to the potential race condition
2723 				 */
2724 				peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2725 			}
2726 			if (peer->peer_bs_inact > 0) {
2727 				/* Do not let it wrap around */
2728 				peer->peer_bs_inact--;
2729 			}
2730 			if (peer->peer_bs_inact == 0)
2731 				dp_mark_peer_inact(peer, true);
2732 		}
2733 	}
2734 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2735 	}
2736 
2737 	qdf_spin_unlock(&soc->peer_ref_mutex);
2738 	qdf_timer_mod(&soc->pdev_bs_inact_timer,
2739 		      soc->pdev_bs_inact_interval * 1000);
2740 }
2741 
2742 
2743 /**
2744  * dp_free_inact_timer(): free inact timer
2745  * @timer - inact timer handle
2746  *
2747  * Return: bool
2748  */
2749 void dp_free_inact_timer(struct dp_soc *soc)
2750 {
2751 	qdf_timer_free(&soc->pdev_bs_inact_timer);
2752 }
2753 #else
2754 
2755 void dp_mark_peer_inact(void *peer, bool inactive)
2756 {
2757 	return;
2758 }
2759 
2760 void dp_free_inact_timer(struct dp_soc *soc)
2761 {
2762 	return;
2763 }
2764 
2765 #endif
2766 
2767 #ifdef IPA_OFFLOAD
2768 /**
2769  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
2770  * @soc: data path instance
2771  * @pdev: core txrx pdev context
2772  *
2773  * Return: QDF_STATUS_SUCCESS: success
2774  *         QDF_STATUS_E_RESOURCES: Error return
2775  */
2776 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2777 					   struct dp_pdev *pdev)
2778 {
2779 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2780 	int entries;
2781 
2782 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2783 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
2784 
2785 	/* Setup second Rx refill buffer ring */
2786 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2787 			  IPA_RX_REFILL_BUF_RING_IDX,
2788 			  pdev->pdev_id,
2789 			  entries)) {
2790 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2791 			FL("dp_srng_setup failed second rx refill ring"));
2792 		return QDF_STATUS_E_FAILURE;
2793 	}
2794 	return QDF_STATUS_SUCCESS;
2795 }
2796 
2797 /**
2798  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
2799  * @soc: data path instance
2800  * @pdev: core txrx pdev context
2801  *
2802  * Return: void
2803  */
2804 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2805 					      struct dp_pdev *pdev)
2806 {
2807 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2808 			IPA_RX_REFILL_BUF_RING_IDX);
2809 }
2810 
2811 #else
2812 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2813 					   struct dp_pdev *pdev)
2814 {
2815 	return QDF_STATUS_SUCCESS;
2816 }
2817 
2818 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2819 					      struct dp_pdev *pdev)
2820 {
2821 }
2822 #endif
2823 
2824 #if !defined(QCA_WIFI_QCA6390) && !defined(DISABLE_MON_CONFIG)
2825 static
2826 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2827 {
2828 	int mac_id = 0;
2829 	int pdev_id = pdev->pdev_id;
2830 	int entries;
2831 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2832 
2833 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2834 
2835 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
2836 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
2837 
2838 		entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
2839 		if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
2840 				  RXDMA_MONITOR_BUF, 0, mac_for_pdev,
2841 				  entries)) {
2842 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2843 				  FL(RNG_ERR "rxdma_mon_buf_ring "));
2844 			return QDF_STATUS_E_NOMEM;
2845 		}
2846 
2847 		entries = wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
2848 		if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
2849 				  RXDMA_MONITOR_DST, 0, mac_for_pdev,
2850 				  entries)) {
2851 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2852 				  FL(RNG_ERR "rxdma_mon_dst_ring"));
2853 			return QDF_STATUS_E_NOMEM;
2854 		}
2855 
2856 		entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
2857 		if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring[mac_id],
2858 				  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
2859 				  entries)) {
2860 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2861 				  FL(RNG_ERR "rxdma_mon_status_ring"));
2862 			return QDF_STATUS_E_NOMEM;
2863 		}
2864 
2865 		entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
2866 		if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
2867 				  RXDMA_MONITOR_DESC, 0, mac_for_pdev,
2868 				  entries)) {
2869 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2870 				  FL(RNG_ERR "rxdma_mon_desc_ring"));
2871 			return QDF_STATUS_E_NOMEM;
2872 		}
2873 	}
2874 	return QDF_STATUS_SUCCESS;
2875 }
2876 #else
2877 static QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2878 {
2879 	return QDF_STATUS_SUCCESS;
2880 }
2881 #endif
2882 
2883 /*dp_iterate_update_peer_list - update peer stats on cal client timer
2884  * @pdev_hdl: pdev handle
2885  */
2886 #ifdef ATH_SUPPORT_EXT_STAT
2887 void  dp_iterate_update_peer_list(void *pdev_hdl)
2888 {
2889 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
2890 	struct dp_vdev *vdev = NULL;
2891 	struct dp_peer *peer = NULL;
2892 
2893 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
2894 		DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
2895 			dp_cal_client_update_peer_stats(&peer->stats);
2896 		}
2897 	}
2898 }
2899 #else
2900 void  dp_iterate_update_peer_list(void *pdev_hdl)
2901 {
2902 }
2903 #endif
2904 
2905 /*
2906 * dp_pdev_attach_wifi3() - attach txrx pdev
2907 * @ctrl_pdev: Opaque PDEV object
2908 * @txrx_soc: Datapath SOC handle
2909 * @htc_handle: HTC handle for host-target interface
2910 * @qdf_osdev: QDF OS device
2911 * @pdev_id: PDEV ID
2912 *
2913 * Return: DP PDEV handle on success, NULL on failure
2914 */
2915 static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
2916 	struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
2917 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
2918 {
2919 	int tx_ring_size;
2920 	int tx_comp_ring_size;
2921 	int reo_dst_ring_size;
2922 	int entries;
2923 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2924 	int nss_cfg;
2925 
2926 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2927 	struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
2928 
2929 	if (!pdev) {
2930 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2931 			FL("DP PDEV memory allocation failed"));
2932 		goto fail0;
2933 	}
2934 
2935 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2936 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
2937 
2938 	if (!pdev->wlan_cfg_ctx) {
2939 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2940 			FL("pdev cfg_attach failed"));
2941 
2942 		qdf_mem_free(pdev);
2943 		goto fail0;
2944 	}
2945 
2946 	/*
2947 	 * set nss pdev config based on soc config
2948 	 */
2949 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
2950 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
2951 			(nss_cfg & (1 << pdev_id)));
2952 
2953 	pdev->soc = soc;
2954 	pdev->ctrl_pdev = ctrl_pdev;
2955 	pdev->pdev_id = pdev_id;
2956 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
2957 	soc->pdev_list[pdev_id] = pdev;
2958 	soc->pdev_count++;
2959 
2960 	TAILQ_INIT(&pdev->vdev_list);
2961 	qdf_spinlock_create(&pdev->vdev_list_lock);
2962 	pdev->vdev_count = 0;
2963 
2964 	qdf_spinlock_create(&pdev->tx_mutex);
2965 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
2966 	TAILQ_INIT(&pdev->neighbour_peers_list);
2967 	pdev->neighbour_peers_added = false;
2968 
2969 	if (dp_soc_cmn_setup(soc)) {
2970 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2971 			FL("dp_soc_cmn_setup failed"));
2972 		goto fail1;
2973 	}
2974 
2975 	/* Setup per PDEV TCL rings if configured */
2976 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2977 		tx_ring_size =
2978 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2979 		tx_comp_ring_size =
2980 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2981 
2982 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
2983 			pdev_id, pdev_id, tx_ring_size)) {
2984 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2985 				FL("dp_srng_setup failed for tcl_data_ring"));
2986 			goto fail1;
2987 		}
2988 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
2989 			WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
2990 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2991 				FL("dp_srng_setup failed for tx_comp_ring"));
2992 			goto fail1;
2993 		}
2994 		soc->num_tcl_data_rings++;
2995 	}
2996 
2997 	/* Tx specific init */
2998 	if (dp_tx_pdev_attach(pdev)) {
2999 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3000 			FL("dp_tx_pdev_attach failed"));
3001 		goto fail1;
3002 	}
3003 
3004 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
3005 	/* Setup per PDEV REO rings if configured */
3006 	if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
3007 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
3008 			pdev_id, pdev_id, reo_dst_ring_size)) {
3009 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3010 				FL("dp_srng_setup failed for reo_dest_ringn"));
3011 			goto fail1;
3012 		}
3013 		soc->num_reo_dest_rings++;
3014 
3015 	}
3016 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
3017 		wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx))) {
3018 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3019 			 FL("dp_srng_setup failed rx refill ring"));
3020 		goto fail1;
3021 	}
3022 
3023 	if (dp_rxdma_ring_setup(soc, pdev)) {
3024 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3025 			 FL("RXDMA ring config failed"));
3026 		goto fail1;
3027 	}
3028 
3029 	if (dp_mon_rings_setup(soc, pdev)) {
3030 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3031 			  FL("MONITOR rings setup failed"));
3032 		goto fail1;
3033 	}
3034 
3035 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
3036 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
3037 		if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
3038 				  0, pdev_id,
3039 				  entries)) {
3040 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3041 				  FL(RNG_ERR "rxdma_err_dst_ring"));
3042 			goto fail1;
3043 		}
3044 	}
3045 
3046 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
3047 		goto fail1;
3048 
3049 	if (dp_ipa_ring_resource_setup(soc, pdev))
3050 		goto fail1;
3051 
3052 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
3053 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3054 			FL("dp_ipa_uc_attach failed"));
3055 		goto fail1;
3056 	}
3057 
3058 	/* Rx specific init */
3059 	if (dp_rx_pdev_attach(pdev)) {
3060 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3061 			FL("dp_rx_pdev_attach failed"));
3062 		goto fail0;
3063 	}
3064 	DP_STATS_INIT(pdev);
3065 
3066 	/* Monitor filter init */
3067 	pdev->mon_filter_mode = MON_FILTER_ALL;
3068 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
3069 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
3070 	pdev->fp_data_filter = FILTER_DATA_ALL;
3071 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
3072 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
3073 	pdev->mo_data_filter = FILTER_DATA_ALL;
3074 
3075 	dp_local_peer_id_pool_init(pdev);
3076 
3077 	dp_dscp_tid_map_setup(pdev);
3078 
3079 	/* Rx monitor mode specific init */
3080 	if (dp_rx_pdev_mon_attach(pdev)) {
3081 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3082 				"dp_rx_pdev_attach failed");
3083 		goto fail1;
3084 	}
3085 
3086 	if (dp_wdi_event_attach(pdev)) {
3087 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3088 				"dp_wdi_evet_attach failed");
3089 		goto fail1;
3090 	}
3091 
3092 	/* set the reo destination during initialization */
3093 	pdev->reo_dest = pdev->pdev_id + 1;
3094 
3095 	/*
3096 	 * initialize ppdu tlv list
3097 	 */
3098 	TAILQ_INIT(&pdev->ppdu_info_list);
3099 	pdev->tlv_count = 0;
3100 	pdev->list_depth = 0;
3101 
3102 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
3103 
3104 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
3105 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
3106 			      TRUE);
3107 
3108 	/* initlialize cal client timer */
3109 	dp_cal_client_attach(&pdev->cal_client_ctx, pdev, pdev->soc->osdev,
3110 			     &dp_iterate_update_peer_list);
3111 
3112 	return (struct cdp_pdev *)pdev;
3113 
3114 fail1:
3115 	dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
3116 
3117 fail0:
3118 	return NULL;
3119 }
3120 
3121 /*
3122 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
3123 * @soc: data path SoC handle
3124 * @pdev: Physical device handle
3125 *
3126 * Return: void
3127 */
3128 #ifdef QCA_HOST2FW_RXBUF_RING
3129 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3130 	 struct dp_pdev *pdev)
3131 {
3132 	int max_mac_rings =
3133 		 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
3134 	int i;
3135 
3136 	max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
3137 				max_mac_rings : MAX_RX_MAC_RINGS;
3138 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
3139 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
3140 			 RXDMA_BUF, 1);
3141 
3142 	qdf_timer_free(&soc->mon_reap_timer);
3143 }
3144 #else
3145 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3146 	 struct dp_pdev *pdev)
3147 {
3148 }
3149 #endif
3150 
3151 /*
3152  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3153  * @pdev: device object
3154  *
3155  * Return: void
3156  */
3157 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3158 {
3159 	struct dp_neighbour_peer *peer = NULL;
3160 	struct dp_neighbour_peer *temp_peer = NULL;
3161 
3162 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3163 			neighbour_peer_list_elem, temp_peer) {
3164 		/* delete this peer from the list */
3165 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
3166 				peer, neighbour_peer_list_elem);
3167 		qdf_mem_free(peer);
3168 	}
3169 
3170 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3171 }
3172 
3173 /**
3174 * dp_htt_ppdu_stats_detach() - detach stats resources
3175 * @pdev: Datapath PDEV handle
3176 *
3177 * Return: void
3178 */
3179 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3180 {
3181 	struct ppdu_info *ppdu_info, *ppdu_info_next;
3182 
3183 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3184 			ppdu_info_list_elem, ppdu_info_next) {
3185 		if (!ppdu_info)
3186 			break;
3187 		qdf_assert_always(ppdu_info->nbuf);
3188 		qdf_nbuf_free(ppdu_info->nbuf);
3189 		qdf_mem_free(ppdu_info);
3190 	}
3191 }
3192 
3193 #if !defined(QCA_WIFI_QCA6390) && !defined(DISABLE_MON_CONFIG)
3194 static
3195 void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3196 			int mac_id)
3197 {
3198 		dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
3199 				RXDMA_MONITOR_BUF, 0);
3200 		dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
3201 				RXDMA_MONITOR_DST, 0);
3202 
3203 		dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring[mac_id],
3204 				RXDMA_MONITOR_STATUS, 0);
3205 
3206 		dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
3207 				RXDMA_MONITOR_DESC, 0);
3208 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3209 				RXDMA_DST, 0);
3210 }
3211 #else
3212 static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3213 			       int mac_id)
3214 {
3215 }
3216 #endif
3217 
3218 /*
3219 * dp_pdev_detach_wifi3() - detach txrx pdev
3220 * @txrx_pdev: Datapath PDEV handle
3221 * @force: Force detach
3222 *
3223 */
3224 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
3225 {
3226 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3227 	struct dp_soc *soc = pdev->soc;
3228 	qdf_nbuf_t curr_nbuf, next_nbuf;
3229 	int mac_id;
3230 
3231 	dp_wdi_event_detach(pdev);
3232 
3233 	dp_tx_pdev_detach(pdev);
3234 
3235 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3236 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
3237 			TCL_DATA, pdev->pdev_id);
3238 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
3239 			WBM2SW_RELEASE, pdev->pdev_id);
3240 	}
3241 
3242 	dp_pktlogmod_exit(pdev);
3243 
3244 	dp_rx_pdev_detach(pdev);
3245 	dp_rx_pdev_mon_detach(pdev);
3246 	dp_neighbour_peers_detach(pdev);
3247 	qdf_spinlock_destroy(&pdev->tx_mutex);
3248 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
3249 
3250 	dp_ipa_uc_detach(soc, pdev);
3251 
3252 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
3253 
3254 	/* Cleanup per PDEV REO rings if configured */
3255 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3256 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
3257 			REO_DST, pdev->pdev_id);
3258 	}
3259 
3260 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
3261 
3262 	dp_rxdma_ring_cleanup(soc, pdev);
3263 
3264 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3265 		dp_mon_ring_deinit(soc, pdev, mac_id);
3266 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3267 			RXDMA_DST, 0);
3268 	}
3269 
3270 	curr_nbuf = pdev->invalid_peer_head_msdu;
3271 	while (curr_nbuf) {
3272 		next_nbuf = qdf_nbuf_next(curr_nbuf);
3273 		qdf_nbuf_free(curr_nbuf);
3274 		curr_nbuf = next_nbuf;
3275 	}
3276 
3277 	dp_htt_ppdu_stats_detach(pdev);
3278 
3279 	qdf_nbuf_free(pdev->sojourn_buf);
3280 
3281 	dp_cal_client_detach(&pdev->cal_client_ctx);
3282 	soc->pdev_list[pdev->pdev_id] = NULL;
3283 	soc->pdev_count--;
3284 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
3285 	qdf_mem_free(pdev->dp_txrx_handle);
3286 	qdf_mem_free(pdev);
3287 }
3288 
3289 /*
3290  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
3291  * @soc: DP SOC handle
3292  */
3293 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
3294 {
3295 	struct reo_desc_list_node *desc;
3296 	struct dp_rx_tid *rx_tid;
3297 
3298 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3299 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
3300 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3301 		rx_tid = &desc->rx_tid;
3302 		qdf_mem_unmap_nbytes_single(soc->osdev,
3303 			rx_tid->hw_qdesc_paddr,
3304 			QDF_DMA_BIDIRECTIONAL,
3305 			rx_tid->hw_qdesc_alloc_size);
3306 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3307 		qdf_mem_free(desc);
3308 	}
3309 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3310 	qdf_list_destroy(&soc->reo_desc_freelist);
3311 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
3312 }
3313 
3314 /*
3315  * dp_soc_detach_wifi3() - Detach txrx SOC
3316  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
3317  */
3318 static void dp_soc_detach_wifi3(void *txrx_soc)
3319 {
3320 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3321 	int i;
3322 
3323 	qdf_atomic_set(&soc->cmn_init_done, 0);
3324 
3325 	qdf_flush_work(&soc->htt_stats.work);
3326 	qdf_disable_work(&soc->htt_stats.work);
3327 
3328 	/* Free pending htt stats messages */
3329 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
3330 
3331 	dp_free_inact_timer(soc);
3332 
3333 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3334 		if (soc->pdev_list[i])
3335 			dp_pdev_detach_wifi3(
3336 				(struct cdp_pdev *)soc->pdev_list[i], 1);
3337 	}
3338 
3339 	dp_peer_find_detach(soc);
3340 
3341 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
3342 	 * SW descriptors
3343 	 */
3344 
3345 	/* Free the ring memories */
3346 	/* Common rings */
3347 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3348 
3349 	dp_tx_soc_detach(soc);
3350 	/* Tx data rings */
3351 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3352 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3353 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
3354 				TCL_DATA, i);
3355 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
3356 				WBM2SW_RELEASE, i);
3357 		}
3358 	}
3359 
3360 	/* TCL command and status rings */
3361 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3362 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3363 
3364 	/* Rx data rings */
3365 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3366 		soc->num_reo_dest_rings =
3367 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3368 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
3369 			/* TODO: Get number of rings and ring sizes
3370 			 * from wlan_cfg
3371 			 */
3372 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
3373 				REO_DST, i);
3374 		}
3375 	}
3376 	/* REO reinjection ring */
3377 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3378 
3379 	/* Rx release ring */
3380 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3381 
3382 	/* Rx exception ring */
3383 	/* TODO: Better to store ring_type and ring_num in
3384 	 * dp_srng during setup
3385 	 */
3386 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3387 
3388 	/* REO command and status rings */
3389 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3390 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
3391 	dp_hw_link_desc_pool_cleanup(soc);
3392 
3393 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
3394 	qdf_spinlock_destroy(&soc->htt_stats.lock);
3395 
3396 	htt_soc_detach(soc->htt_handle);
3397 
3398 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3399 
3400 	dp_reo_cmdlist_destroy(soc);
3401 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3402 	dp_reo_desc_freelist_destroy(soc);
3403 
3404 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
3405 
3406 	dp_soc_wds_detach(soc);
3407 	qdf_spinlock_destroy(&soc->ast_lock);
3408 
3409 	qdf_mem_free(soc);
3410 }
3411 
3412 #if !defined(QCA_WIFI_QCA6390) && !defined(DISABLE_MON_CONFIG)
3413 static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3414 				  struct dp_pdev *pdev,
3415 				  int mac_id,
3416 				  int mac_for_pdev)
3417 {
3418 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3419 		       pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3420 		       RXDMA_MONITOR_BUF);
3421 
3422 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3423 		       pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3424 		       RXDMA_MONITOR_DST);
3425 
3426 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3427 		       pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3428 		       RXDMA_MONITOR_STATUS);
3429 
3430 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3431 		       pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3432 		       RXDMA_MONITOR_DESC);
3433 }
3434 #else
3435 static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3436 				  struct dp_pdev *pdev,
3437 				  int mac_id,
3438 				  int mac_for_pdev)
3439 {
3440 }
3441 #endif
3442 /*
3443  * dp_rxdma_ring_config() - configure the RX DMA rings
3444  *
3445  * This function is used to configure the MAC rings.
3446  * On MCL host provides buffers in Host2FW ring
3447  * FW refills (copies) buffers to the ring and updates
3448  * ring_idx in register
3449  *
3450  * @soc: data path SoC handle
3451  *
3452  * Return: void
3453  */
3454 #ifdef QCA_HOST2FW_RXBUF_RING
3455 static void dp_rxdma_ring_config(struct dp_soc *soc)
3456 {
3457 	int i;
3458 
3459 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3460 		struct dp_pdev *pdev = soc->pdev_list[i];
3461 
3462 		if (pdev) {
3463 			int mac_id;
3464 			bool dbs_enable = 0;
3465 			int max_mac_rings =
3466 				 wlan_cfg_get_num_mac_rings
3467 				(pdev->wlan_cfg_ctx);
3468 
3469 			htt_srng_setup(soc->htt_handle, 0,
3470 				 pdev->rx_refill_buf_ring.hal_srng,
3471 				 RXDMA_BUF);
3472 
3473 			if (pdev->rx_refill_buf_ring2.hal_srng)
3474 				htt_srng_setup(soc->htt_handle, 0,
3475 					pdev->rx_refill_buf_ring2.hal_srng,
3476 					RXDMA_BUF);
3477 
3478 			if (soc->cdp_soc.ol_ops->
3479 				is_hw_dbs_2x2_capable) {
3480 				dbs_enable = soc->cdp_soc.ol_ops->
3481 					is_hw_dbs_2x2_capable(soc->ctrl_psoc);
3482 			}
3483 
3484 			if (dbs_enable) {
3485 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3486 				QDF_TRACE_LEVEL_ERROR,
3487 				FL("DBS enabled max_mac_rings %d"),
3488 					 max_mac_rings);
3489 			} else {
3490 				max_mac_rings = 1;
3491 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3492 					 QDF_TRACE_LEVEL_ERROR,
3493 					 FL("DBS disabled, max_mac_rings %d"),
3494 					 max_mac_rings);
3495 			}
3496 
3497 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3498 					 FL("pdev_id %d max_mac_rings %d"),
3499 					 pdev->pdev_id, max_mac_rings);
3500 
3501 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
3502 				int mac_for_pdev = dp_get_mac_id_for_pdev(
3503 							mac_id, pdev->pdev_id);
3504 
3505 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3506 					 QDF_TRACE_LEVEL_ERROR,
3507 					 FL("mac_id %d"), mac_for_pdev);
3508 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3509 					 pdev->rx_mac_buf_ring[mac_id]
3510 						.hal_srng,
3511 					 RXDMA_BUF);
3512 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3513 					pdev->rxdma_err_dst_ring[mac_id]
3514 						.hal_srng,
3515 					RXDMA_DST);
3516 
3517 				/* Configure monitor mode rings */
3518 				dp_mon_htt_srng_setup(soc, pdev, mac_id,
3519 						      mac_for_pdev);
3520 
3521 			}
3522 		}
3523 	}
3524 
3525 	/*
3526 	 * Timer to reap rxdma status rings.
3527 	 * Needed until we enable ppdu end interrupts
3528 	 */
3529 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
3530 			dp_service_mon_rings, (void *)soc,
3531 			QDF_TIMER_TYPE_WAKE_APPS);
3532 	soc->reap_timer_init = 1;
3533 }
3534 #else
3535 /* This is only for WIN */
3536 static void dp_rxdma_ring_config(struct dp_soc *soc)
3537 {
3538 	int i;
3539 	int mac_id;
3540 
3541 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3542 		struct dp_pdev *pdev = soc->pdev_list[i];
3543 
3544 		if (pdev == NULL)
3545 			continue;
3546 
3547 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3548 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
3549 
3550 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3551 				pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
3552 #ifndef DISABLE_MON_CONFIG
3553 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3554 				pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3555 				RXDMA_MONITOR_BUF);
3556 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3557 				pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3558 				RXDMA_MONITOR_DST);
3559 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3560 				pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3561 				RXDMA_MONITOR_STATUS);
3562 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3563 				pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3564 				RXDMA_MONITOR_DESC);
3565 #endif
3566 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3567 				pdev->rxdma_err_dst_ring[mac_id].hal_srng,
3568 				RXDMA_DST);
3569 		}
3570 	}
3571 }
3572 #endif
3573 
3574 /*
3575  * dp_soc_attach_target_wifi3() - SOC initialization in the target
3576  * @txrx_soc: Datapath SOC handle
3577  */
3578 static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
3579 {
3580 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
3581 
3582 	htt_soc_attach_target(soc->htt_handle);
3583 
3584 	dp_rxdma_ring_config(soc);
3585 
3586 	DP_STATS_INIT(soc);
3587 
3588 	/* initialize work queue for stats processing */
3589 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
3590 
3591 	return 0;
3592 }
3593 
3594 /*
3595  * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
3596  * @txrx_soc: Datapath SOC handle
3597  */
3598 static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
3599 {
3600 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3601 	return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
3602 }
3603 /*
3604  * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
3605  * @txrx_soc: Datapath SOC handle
3606  * @nss_cfg: nss config
3607  */
3608 static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
3609 {
3610 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3611 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
3612 
3613 	wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
3614 
3615 	/*
3616 	 * TODO: masked out based on the per offloaded radio
3617 	 */
3618 	if (config == dp_nss_cfg_dbdc) {
3619 		wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
3620 		wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
3621 		wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
3622 		wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
3623 	}
3624 
3625 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3626 		  FL("nss-wifi<0> nss config is enabled"));
3627 }
3628 /*
3629 * dp_vdev_attach_wifi3() - attach txrx vdev
3630 * @txrx_pdev: Datapath PDEV handle
3631 * @vdev_mac_addr: MAC address of the virtual interface
3632 * @vdev_id: VDEV Id
3633 * @wlan_op_mode: VDEV operating mode
3634 *
3635 * Return: DP VDEV handle on success, NULL on failure
3636 */
3637 static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
3638 	uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
3639 {
3640 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3641 	struct dp_soc *soc = pdev->soc;
3642 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
3643 
3644 	if (!vdev) {
3645 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3646 			FL("DP VDEV memory allocation failed"));
3647 		goto fail0;
3648 	}
3649 
3650 	vdev->pdev = pdev;
3651 	vdev->vdev_id = vdev_id;
3652 	vdev->opmode = op_mode;
3653 	vdev->osdev = soc->osdev;
3654 
3655 	vdev->osif_rx = NULL;
3656 	vdev->osif_rsim_rx_decap = NULL;
3657 	vdev->osif_get_key = NULL;
3658 	vdev->osif_rx_mon = NULL;
3659 	vdev->osif_tx_free_ext = NULL;
3660 	vdev->osif_vdev = NULL;
3661 
3662 	vdev->delete.pending = 0;
3663 	vdev->safemode = 0;
3664 	vdev->drop_unenc = 1;
3665 	vdev->sec_type = cdp_sec_type_none;
3666 #ifdef notyet
3667 	vdev->filters_num = 0;
3668 #endif
3669 
3670 	qdf_mem_copy(
3671 		&vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3672 
3673 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3674 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3675 	vdev->dscp_tid_map_id = 0;
3676 	vdev->mcast_enhancement_en = 0;
3677 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
3678 
3679 	/* TODO: Initialize default HTT meta data that will be used in
3680 	 * TCL descriptors for packets transmitted from this VDEV
3681 	 */
3682 
3683 	TAILQ_INIT(&vdev->peer_list);
3684 
3685 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3686 	/* add this vdev into the pdev's list */
3687 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
3688 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3689 	pdev->vdev_count++;
3690 
3691 	dp_tx_vdev_attach(vdev);
3692 
3693 
3694 	if ((soc->intr_mode == DP_INTR_POLL) &&
3695 			wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
3696 		if (pdev->vdev_count == 1)
3697 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
3698 	}
3699 
3700 	dp_lro_hash_setup(soc);
3701 
3702 	/* LRO */
3703 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3704 		wlan_op_mode_sta == vdev->opmode)
3705 		vdev->lro_enable = true;
3706 
3707 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3708 		 "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable);
3709 
3710 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3711 		"Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
3712 	DP_STATS_INIT(vdev);
3713 
3714 	if (wlan_op_mode_sta == vdev->opmode)
3715 		dp_peer_create_wifi3((struct cdp_vdev *)vdev,
3716 							vdev->mac_addr.raw,
3717 							NULL);
3718 
3719 	return (struct cdp_vdev *)vdev;
3720 
3721 fail0:
3722 	return NULL;
3723 }
3724 
3725 /**
3726  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
3727  * @vdev: Datapath VDEV handle
3728  * @osif_vdev: OSIF vdev handle
3729  * @ctrl_vdev: UMAC vdev handle
3730  * @txrx_ops: Tx and Rx operations
3731  *
3732  * Return: DP VDEV handle on success, NULL on failure
3733  */
3734 static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
3735 	void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
3736 	struct ol_txrx_ops *txrx_ops)
3737 {
3738 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3739 	vdev->osif_vdev = osif_vdev;
3740 	vdev->ctrl_vdev = ctrl_vdev;
3741 	vdev->osif_rx = txrx_ops->rx.rx;
3742 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
3743 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
3744 	vdev->osif_get_key = txrx_ops->get_key;
3745 	vdev->osif_rx_mon = txrx_ops->rx.mon;
3746 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
3747 #ifdef notyet
3748 #if ATH_SUPPORT_WAPI
3749 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
3750 #endif
3751 #endif
3752 #ifdef UMAC_SUPPORT_PROXY_ARP
3753 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
3754 #endif
3755 	vdev->me_convert = txrx_ops->me_convert;
3756 
3757 	/* TODO: Enable the following once Tx code is integrated */
3758 	if (vdev->mesh_vdev)
3759 		txrx_ops->tx.tx = dp_tx_send_mesh;
3760 	else
3761 		txrx_ops->tx.tx = dp_tx_send;
3762 
3763 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
3764 
3765 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
3766 		"DP Vdev Register success");
3767 }
3768 
3769 /**
3770  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
3771  * @vdev: Datapath VDEV handle
3772  *
3773  * Return: void
3774  */
3775 static void dp_vdev_flush_peers(struct dp_vdev *vdev)
3776 {
3777 	struct dp_pdev *pdev = vdev->pdev;
3778 	struct dp_soc *soc = pdev->soc;
3779 	struct dp_peer *peer;
3780 	uint16_t *peer_ids;
3781 	uint8_t i = 0, j = 0;
3782 
3783 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
3784 	if (!peer_ids) {
3785 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3786 			"DP alloc failure - unable to flush peers");
3787 		return;
3788 	}
3789 
3790 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3791 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3792 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3793 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
3794 				if (j < soc->max_peers)
3795 					peer_ids[j++] = peer->peer_ids[i];
3796 	}
3797 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3798 
3799 	for (i = 0; i < j ; i++)
3800 		dp_rx_peer_unmap_handler(soc, peer_ids[i], vdev->vdev_id,
3801 					 NULL, 0);
3802 
3803 	qdf_mem_free(peer_ids);
3804 
3805 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3806 		FL("Flushed peers for vdev object %pK "), vdev);
3807 }
3808 
3809 /*
3810  * dp_vdev_detach_wifi3() - Detach txrx vdev
3811  * @txrx_vdev:		Datapath VDEV handle
3812  * @callback:		Callback OL_IF on completion of detach
3813  * @cb_context:	Callback context
3814  *
3815  */
3816 static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
3817 	ol_txrx_vdev_delete_cb callback, void *cb_context)
3818 {
3819 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3820 	struct dp_pdev *pdev = vdev->pdev;
3821 	struct dp_soc *soc = pdev->soc;
3822 	struct dp_neighbour_peer *peer = NULL;
3823 
3824 	/* preconditions */
3825 	qdf_assert(vdev);
3826 
3827 
3828 	if (wlan_op_mode_sta == vdev->opmode)
3829 		dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
3830 
3831 	/*
3832 	 * If Target is hung, flush all peers before detaching vdev
3833 	 * this will free all references held due to missing
3834 	 * unmap commands from Target
3835 	 */
3836 	if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
3837 		dp_vdev_flush_peers(vdev);
3838 
3839 	/*
3840 	 * Use peer_ref_mutex while accessing peer_list, in case
3841 	 * a peer is in the process of being removed from the list.
3842 	 */
3843 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3844 	/* check that the vdev has no peers allocated */
3845 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
3846 		/* debug print - will be removed later */
3847 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
3848 			FL("not deleting vdev object %pK (%pM)"
3849 			"until deletion finishes for all its peers"),
3850 			vdev, vdev->mac_addr.raw);
3851 		/* indicate that the vdev needs to be deleted */
3852 		vdev->delete.pending = 1;
3853 		vdev->delete.callback = callback;
3854 		vdev->delete.context = cb_context;
3855 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3856 		return;
3857 	}
3858 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3859 
3860 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3861 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
3862 		      neighbour_peer_list_elem) {
3863 		QDF_ASSERT(peer->vdev != vdev);
3864 	}
3865 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3866 
3867 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3868 	dp_tx_vdev_detach(vdev);
3869 	/* remove the vdev from its parent pdev's list */
3870 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
3871 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3872 		FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
3873 	qdf_mem_free(vdev);
3874 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3875 
3876 	if (callback)
3877 		callback(cb_context);
3878 }
3879 
3880 /*
3881  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
3882  * @soc - datapath soc handle
3883  * @peer - datapath peer handle
3884  *
3885  * Delete the AST entries belonging to a peer
3886  */
3887 #ifdef FEATURE_AST
3888 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3889 					      struct dp_peer *peer)
3890 {
3891 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
3892 
3893 	qdf_spin_lock_bh(&soc->ast_lock);
3894 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
3895 		dp_peer_del_ast(soc, ast_entry);
3896 
3897 	peer->self_ast_entry = NULL;
3898 	TAILQ_INIT(&peer->ast_entry_list);
3899 	qdf_spin_unlock_bh(&soc->ast_lock);
3900 }
3901 #else
3902 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3903 					      struct dp_peer *peer)
3904 {
3905 }
3906 #endif
3907 
3908 #if ATH_SUPPORT_WRAP
3909 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
3910 						uint8_t *peer_mac_addr)
3911 {
3912 	struct dp_peer *peer;
3913 
3914 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
3915 				      0, vdev->vdev_id);
3916 	if (!peer)
3917 		return NULL;
3918 
3919 	if (peer->bss_peer)
3920 		return peer;
3921 
3922 	qdf_atomic_dec(&peer->ref_cnt);
3923 	return NULL;
3924 }
3925 #else
3926 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
3927 						uint8_t *peer_mac_addr)
3928 {
3929 	struct dp_peer *peer;
3930 
3931 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
3932 				      0, vdev->vdev_id);
3933 	if (!peer)
3934 		return NULL;
3935 
3936 	if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
3937 		return peer;
3938 
3939 	qdf_atomic_dec(&peer->ref_cnt);
3940 	return NULL;
3941 }
3942 #endif
3943 
3944 /*
3945  * dp_peer_create_wifi3() - attach txrx peer
3946  * @txrx_vdev: Datapath VDEV handle
3947  * @peer_mac_addr: Peer MAC address
3948  *
3949  * Return: DP peeer handle on success, NULL on failure
3950  */
3951 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
3952 		uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
3953 {
3954 	struct dp_peer *peer;
3955 	int i;
3956 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3957 	struct dp_pdev *pdev;
3958 	struct dp_soc *soc;
3959 	struct dp_ast_entry *ast_entry;
3960 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
3961 
3962 	/* preconditions */
3963 	qdf_assert(vdev);
3964 	qdf_assert(peer_mac_addr);
3965 
3966 	pdev = vdev->pdev;
3967 	soc = pdev->soc;
3968 
3969 	/*
3970 	 * If a peer entry with given MAC address already exists,
3971 	 * reuse the peer and reset the state of peer.
3972 	 */
3973 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
3974 
3975 	if (peer) {
3976 		peer->delete_in_progress = false;
3977 
3978 		dp_peer_delete_ast_entries(soc, peer);
3979 
3980 		if ((vdev->opmode == wlan_op_mode_sta) &&
3981 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
3982 		     DP_MAC_ADDR_LEN)) {
3983 			ast_type = CDP_TXRX_AST_TYPE_SELF;
3984 		}
3985 
3986 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
3987 
3988 		/*
3989 		* Control path maintains a node count which is incremented
3990 		* for every new peer create command. Since new peer is not being
3991 		* created and earlier reference is reused here,
3992 		* peer_unref_delete event is sent to control path to
3993 		* increment the count back.
3994 		*/
3995 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
3996 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
3997 				vdev->vdev_id, peer->mac_addr.raw);
3998 		}
3999 		peer->ctrl_peer = ctrl_peer;
4000 
4001 		dp_local_peer_id_alloc(pdev, peer);
4002 		DP_STATS_INIT(peer);
4003 
4004 		return (void *)peer;
4005 	} else {
4006 		/*
4007 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
4008 		 * need to remove the AST entry which was earlier added as a WDS
4009 		 * entry.
4010 		 * If an AST entry exists, but no peer entry exists with a given
4011 		 * MAC addresses, we could deduce it as a WDS entry
4012 		 */
4013 		qdf_spin_lock_bh(&soc->ast_lock);
4014 		ast_entry = dp_peer_ast_hash_find(soc, peer_mac_addr);
4015 		if (ast_entry)
4016 			dp_peer_del_ast(soc, ast_entry);
4017 		qdf_spin_unlock_bh(&soc->ast_lock);
4018 	}
4019 
4020 #ifdef notyet
4021 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
4022 		soc->mempool_ol_ath_peer);
4023 #else
4024 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
4025 #endif
4026 
4027 	if (!peer)
4028 		return NULL; /* failure */
4029 
4030 	qdf_mem_zero(peer, sizeof(struct dp_peer));
4031 
4032 	TAILQ_INIT(&peer->ast_entry_list);
4033 
4034 	/* store provided params */
4035 	peer->vdev = vdev;
4036 	peer->ctrl_peer = ctrl_peer;
4037 
4038 	if ((vdev->opmode == wlan_op_mode_sta) &&
4039 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
4040 			 DP_MAC_ADDR_LEN)) {
4041 		ast_type = CDP_TXRX_AST_TYPE_SELF;
4042 	}
4043 
4044 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
4045 
4046 	qdf_spinlock_create(&peer->peer_info_lock);
4047 
4048 	qdf_mem_copy(
4049 		&peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
4050 
4051 	/* TODO: See of rx_opt_proc is really required */
4052 	peer->rx_opt_proc = soc->rx_opt_proc;
4053 
4054 	/* initialize the peer_id */
4055 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
4056 		peer->peer_ids[i] = HTT_INVALID_PEER;
4057 
4058 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4059 
4060 	qdf_atomic_init(&peer->ref_cnt);
4061 
4062 	/* keep one reference for attach */
4063 	qdf_atomic_inc(&peer->ref_cnt);
4064 
4065 	/* add this peer into the vdev's list */
4066 	if (wlan_op_mode_sta == vdev->opmode)
4067 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
4068 	else
4069 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
4070 
4071 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4072 
4073 	/* TODO: See if hash based search is required */
4074 	dp_peer_find_hash_add(soc, peer);
4075 
4076 	/* Initialize the peer state */
4077 	peer->state = OL_TXRX_PEER_STATE_DISC;
4078 
4079 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4080 		"vdev %pK created peer %pK (%pM) ref_cnt: %d",
4081 		vdev, peer, peer->mac_addr.raw,
4082 		qdf_atomic_read(&peer->ref_cnt));
4083 	/*
4084 	 * For every peer MAp message search and set if bss_peer
4085 	 */
4086 	if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
4087 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4088 			"vdev bss_peer!!!!");
4089 		peer->bss_peer = 1;
4090 		vdev->vap_bss_peer = peer;
4091 	}
4092 	for (i = 0; i < DP_MAX_TIDS; i++)
4093 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
4094 
4095 	dp_local_peer_id_alloc(pdev, peer);
4096 	DP_STATS_INIT(peer);
4097 	return (void *)peer;
4098 }
4099 
4100 /*
4101  * dp_peer_setup_wifi3() - initialize the peer
4102  * @vdev_hdl: virtual device object
4103  * @peer: Peer object
4104  *
4105  * Return: void
4106  */
4107 static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
4108 {
4109 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
4110 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4111 	struct dp_pdev *pdev;
4112 	struct dp_soc *soc;
4113 	bool hash_based = 0;
4114 	enum cdp_host_reo_dest_ring reo_dest;
4115 
4116 	/* preconditions */
4117 	qdf_assert(vdev);
4118 	qdf_assert(peer);
4119 
4120 	pdev = vdev->pdev;
4121 	soc = pdev->soc;
4122 
4123 	peer->last_assoc_rcvd = 0;
4124 	peer->last_disassoc_rcvd = 0;
4125 	peer->last_deauth_rcvd = 0;
4126 
4127 	/*
4128 	 * hash based steering is disabled for Radios which are offloaded
4129 	 * to NSS
4130 	 */
4131 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
4132 		hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
4133 
4134 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4135 		FL("hash based steering for pdev: %d is %d"),
4136 		pdev->pdev_id, hash_based);
4137 
4138 	/*
4139 	 * Below line of code will ensure the proper reo_dest ring is chosen
4140 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
4141 	 */
4142 	reo_dest = pdev->reo_dest;
4143 
4144 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
4145 		/* TODO: Check the destination ring number to be passed to FW */
4146 		soc->cdp_soc.ol_ops->peer_set_default_routing(
4147 				pdev->ctrl_pdev, peer->mac_addr.raw,
4148 				peer->vdev->vdev_id, hash_based, reo_dest);
4149 	}
4150 
4151 	dp_peer_rx_init(pdev, peer);
4152 	return;
4153 }
4154 
4155 /*
4156  * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
4157  * @vdev_handle: virtual device object
4158  * @htt_pkt_type: type of pkt
4159  *
4160  * Return: void
4161  */
4162 static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
4163 	 enum htt_cmn_pkt_type val)
4164 {
4165 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4166 	vdev->tx_encap_type = val;
4167 }
4168 
4169 /*
4170  * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
4171  * @vdev_handle: virtual device object
4172  * @htt_pkt_type: type of pkt
4173  *
4174  * Return: void
4175  */
4176 static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
4177 	 enum htt_cmn_pkt_type val)
4178 {
4179 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4180 	vdev->rx_decap_type = val;
4181 }
4182 
4183 /*
4184  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
4185  * @txrx_soc: cdp soc handle
4186  * @ac: Access category
4187  * @value: timeout value in millisec
4188  *
4189  * Return: void
4190  */
4191 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
4192 				    uint8_t ac, uint32_t value)
4193 {
4194 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4195 
4196 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
4197 }
4198 
4199 /*
4200  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
4201  * @txrx_soc: cdp soc handle
4202  * @ac: access category
4203  * @value: timeout value in millisec
4204  *
4205  * Return: void
4206  */
4207 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
4208 				    uint8_t ac, uint32_t *value)
4209 {
4210 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4211 
4212 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
4213 }
4214 
4215 /*
4216  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
4217  * @pdev_handle: physical device object
4218  * @val: reo destination ring index (1 - 4)
4219  *
4220  * Return: void
4221  */
4222 static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
4223 	 enum cdp_host_reo_dest_ring val)
4224 {
4225 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4226 
4227 	if (pdev)
4228 		pdev->reo_dest = val;
4229 }
4230 
4231 /*
4232  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
4233  * @pdev_handle: physical device object
4234  *
4235  * Return: reo destination ring index
4236  */
4237 static enum cdp_host_reo_dest_ring
4238 dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
4239 {
4240 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4241 
4242 	if (pdev)
4243 		return pdev->reo_dest;
4244 	else
4245 		return cdp_host_reo_dest_ring_unknown;
4246 }
4247 
4248 #ifdef QCA_SUPPORT_SON
4249 static void dp_son_peer_authorize(struct dp_peer *peer)
4250 {
4251 	struct dp_soc *soc;
4252 	soc = peer->vdev->pdev->soc;
4253 	peer->peer_bs_inact_flag = 0;
4254 	peer->peer_bs_inact = soc->pdev_bs_inact_reload;
4255 	return;
4256 }
4257 #else
4258 static void dp_son_peer_authorize(struct dp_peer *peer)
4259 {
4260 	return;
4261 }
4262 #endif
4263 /*
4264  * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
4265  * @pdev_handle: device object
4266  * @val: value to be set
4267  *
4268  * Return: void
4269  */
4270 static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
4271 	 uint32_t val)
4272 {
4273 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4274 
4275 	/* Enable/Disable smart mesh filtering. This flag will be checked
4276 	 * during rx processing to check if packets are from NAC clients.
4277 	 */
4278 	pdev->filter_neighbour_peers = val;
4279 	return 0;
4280 }
4281 
4282 /*
4283  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
4284  * address for smart mesh filtering
4285  * @vdev_handle: virtual device object
4286  * @cmd: Add/Del command
4287  * @macaddr: nac client mac address
4288  *
4289  * Return: void
4290  */
4291 static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
4292 					    uint32_t cmd, uint8_t *macaddr)
4293 {
4294 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4295 	struct dp_pdev *pdev = vdev->pdev;
4296 	struct dp_neighbour_peer *peer = NULL;
4297 
4298 	if (!macaddr)
4299 		goto fail0;
4300 
4301 	/* Store address of NAC (neighbour peer) which will be checked
4302 	 * against TA of received packets.
4303 	 */
4304 	if (cmd == DP_NAC_PARAM_ADD) {
4305 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
4306 				sizeof(*peer));
4307 
4308 		if (!peer) {
4309 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4310 				FL("DP neighbour peer node memory allocation failed"));
4311 			goto fail0;
4312 		}
4313 
4314 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
4315 			macaddr, DP_MAC_ADDR_LEN);
4316 		peer->vdev = vdev;
4317 
4318 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4319 
4320 		/* add this neighbour peer into the list */
4321 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
4322 				neighbour_peer_list_elem);
4323 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4324 
4325 		/* first neighbour */
4326 		if (!pdev->neighbour_peers_added) {
4327 			pdev->neighbour_peers_added = true;
4328 			dp_ppdu_ring_cfg(pdev);
4329 		}
4330 		return 1;
4331 
4332 	} else if (cmd == DP_NAC_PARAM_DEL) {
4333 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4334 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
4335 				neighbour_peer_list_elem) {
4336 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
4337 				macaddr, DP_MAC_ADDR_LEN)) {
4338 				/* delete this peer from the list */
4339 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
4340 					peer, neighbour_peer_list_elem);
4341 				qdf_mem_free(peer);
4342 				break;
4343 			}
4344 		}
4345 		/* last neighbour deleted */
4346 		if (TAILQ_EMPTY(&pdev->neighbour_peers_list))
4347 			pdev->neighbour_peers_added = false;
4348 
4349 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4350 
4351 		if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
4352 		    !pdev->enhanced_stats_en)
4353 			dp_ppdu_ring_reset(pdev);
4354 		return 1;
4355 
4356 	}
4357 
4358 fail0:
4359 	return 0;
4360 }
4361 
4362 /*
4363  * dp_get_sec_type() - Get the security type
4364  * @peer:		Datapath peer handle
4365  * @sec_idx:    Security id (mcast, ucast)
4366  *
4367  * return sec_type: Security type
4368  */
4369 static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
4370 {
4371 	struct dp_peer *dpeer = (struct dp_peer *)peer;
4372 
4373 	return dpeer->security[sec_idx].sec_type;
4374 }
4375 
4376 /*
4377  * dp_peer_authorize() - authorize txrx peer
4378  * @peer_handle:		Datapath peer handle
4379  * @authorize
4380  *
4381  */
4382 static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
4383 {
4384 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4385 	struct dp_soc *soc;
4386 
4387 	if (peer != NULL) {
4388 		soc = peer->vdev->pdev->soc;
4389 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
4390 		dp_son_peer_authorize(peer);
4391 		peer->authorize = authorize ? 1 : 0;
4392 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4393 	}
4394 }
4395 
4396 #ifdef QCA_SUPPORT_SON
4397 /*
4398  * dp_txrx_update_inact_threshold() - Update inact timer threshold
4399  * @pdev_handle: Device handle
4400  * @new_threshold : updated threshold value
4401  *
4402  */
4403 static void
4404 dp_txrx_update_inact_threshold(struct cdp_pdev *pdev_handle,
4405 			       u_int16_t new_threshold)
4406 {
4407 	struct dp_vdev *vdev;
4408 	struct dp_peer *peer;
4409 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4410 	struct dp_soc *soc = pdev->soc;
4411 	u_int16_t old_threshold = soc->pdev_bs_inact_reload;
4412 
4413 	if (old_threshold == new_threshold)
4414 		return;
4415 
4416 	soc->pdev_bs_inact_reload = new_threshold;
4417 
4418 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4419 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4420 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4421 		if (vdev->opmode != wlan_op_mode_ap)
4422 			continue;
4423 
4424 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4425 			if (!peer->authorize)
4426 				continue;
4427 
4428 			if (old_threshold - peer->peer_bs_inact >=
4429 					new_threshold) {
4430 				dp_mark_peer_inact((void *)peer, true);
4431 				peer->peer_bs_inact = 0;
4432 			} else {
4433 				peer->peer_bs_inact = new_threshold -
4434 					(old_threshold - peer->peer_bs_inact);
4435 			}
4436 		}
4437 	}
4438 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4439 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4440 }
4441 
4442 /**
4443  * dp_txrx_reset_inact_count(): Reset inact count
4444  * @pdev_handle - device handle
4445  *
4446  * Return: void
4447  */
4448 static void
4449 dp_txrx_reset_inact_count(struct cdp_pdev *pdev_handle)
4450 {
4451 	struct dp_vdev *vdev = NULL;
4452 	struct dp_peer *peer = NULL;
4453 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4454 	struct dp_soc *soc = pdev->soc;
4455 
4456 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4457 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4458 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4459 		if (vdev->opmode != wlan_op_mode_ap)
4460 			continue;
4461 
4462 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4463 			if (!peer->authorize)
4464 				continue;
4465 
4466 			peer->peer_bs_inact = soc->pdev_bs_inact_reload;
4467 		}
4468 	}
4469 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4470 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4471 }
4472 
4473 /**
4474  * dp_set_inact_params(): set inactivity params
4475  * @pdev_handle - device handle
4476  * @inact_check_interval - inactivity interval
4477  * @inact_normal - Inactivity normal
4478  * @inact_overload - Inactivity overload
4479  *
4480  * Return: bool
4481  */
4482 bool dp_set_inact_params(struct cdp_pdev *pdev_handle,
4483 			 u_int16_t inact_check_interval,
4484 			 u_int16_t inact_normal, u_int16_t inact_overload)
4485 {
4486 	struct dp_soc *soc;
4487 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4488 
4489 	if (!pdev)
4490 		return false;
4491 
4492 	soc = pdev->soc;
4493 	if (!soc)
4494 		return false;
4495 
4496 	soc->pdev_bs_inact_interval = inact_check_interval;
4497 	soc->pdev_bs_inact_normal = inact_normal;
4498 	soc->pdev_bs_inact_overload = inact_overload;
4499 
4500 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4501 					soc->pdev_bs_inact_normal);
4502 
4503 	return true;
4504 }
4505 
4506 /**
4507  * dp_start_inact_timer(): Inactivity timer start
4508  * @pdev_handle - device handle
4509  * @enable - Inactivity timer start/stop
4510  *
4511  * Return: bool
4512  */
4513 bool dp_start_inact_timer(struct cdp_pdev *pdev_handle, bool enable)
4514 {
4515 	struct dp_soc *soc;
4516 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4517 
4518 	if (!pdev)
4519 		return false;
4520 
4521 	soc = pdev->soc;
4522 	if (!soc)
4523 		return false;
4524 
4525 	if (enable) {
4526 		dp_txrx_reset_inact_count((struct cdp_pdev *)pdev);
4527 		qdf_timer_mod(&soc->pdev_bs_inact_timer,
4528 			      soc->pdev_bs_inact_interval * 1000);
4529 	} else {
4530 		qdf_timer_stop(&soc->pdev_bs_inact_timer);
4531 	}
4532 
4533 	return true;
4534 }
4535 
4536 /**
4537  * dp_set_overload(): Set inactivity overload
4538  * @pdev_handle - device handle
4539  * @overload - overload status
4540  *
4541  * Return: void
4542  */
4543 void dp_set_overload(struct cdp_pdev *pdev_handle, bool overload)
4544 {
4545 	struct dp_soc *soc;
4546 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4547 
4548 	if (!pdev)
4549 		return;
4550 
4551 	soc = pdev->soc;
4552 	if (!soc)
4553 		return;
4554 
4555 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4556 			overload ? soc->pdev_bs_inact_overload :
4557 			soc->pdev_bs_inact_normal);
4558 }
4559 
4560 /**
4561  * dp_peer_is_inact(): check whether peer is inactive
4562  * @peer_handle - datapath peer handle
4563  *
4564  * Return: bool
4565  */
4566 bool dp_peer_is_inact(void *peer_handle)
4567 {
4568 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4569 
4570 	if (!peer)
4571 		return false;
4572 
4573 	return peer->peer_bs_inact_flag == 1;
4574 }
4575 
4576 /**
4577  * dp_init_inact_timer: initialize the inact timer
4578  * @soc - SOC handle
4579  *
4580  * Return: void
4581  */
4582 void dp_init_inact_timer(struct dp_soc *soc)
4583 {
4584 	qdf_timer_init(soc->osdev, &soc->pdev_bs_inact_timer,
4585 		dp_txrx_peer_find_inact_timeout_handler,
4586 		(void *)soc, QDF_TIMER_TYPE_WAKE_APPS);
4587 }
4588 
4589 #else
4590 
4591 bool dp_set_inact_params(struct cdp_pdev *pdev, u_int16_t inact_check_interval,
4592 			 u_int16_t inact_normal, u_int16_t inact_overload)
4593 {
4594 	return false;
4595 }
4596 
4597 bool dp_start_inact_timer(struct cdp_pdev *pdev, bool enable)
4598 {
4599 	return false;
4600 }
4601 
4602 void dp_set_overload(struct cdp_pdev *pdev, bool overload)
4603 {
4604 	return;
4605 }
4606 
4607 void dp_init_inact_timer(struct dp_soc *soc)
4608 {
4609 	return;
4610 }
4611 
4612 bool dp_peer_is_inact(void *peer)
4613 {
4614 	return false;
4615 }
4616 #endif
4617 
4618 static void dp_reset_and_release_peer_mem(struct dp_soc *soc,
4619 					  struct dp_pdev *pdev,
4620 					  struct dp_peer *peer,
4621 					  uint32_t vdev_id)
4622 {
4623 	struct dp_vdev *vdev = NULL;
4624 	struct dp_peer *bss_peer = NULL;
4625 	uint8_t *m_addr = NULL;
4626 
4627 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4628 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4629 		if (vdev->vdev_id == vdev_id)
4630 			break;
4631 	}
4632 	if (!vdev) {
4633 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4634 			  "vdev is NULL");
4635 	} else {
4636 		if (vdev->vap_bss_peer == peer)
4637 		    vdev->vap_bss_peer = NULL;
4638 		m_addr = peer->mac_addr.raw;
4639 		if (soc->cdp_soc.ol_ops->peer_unref_delete)
4640 		    soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
4641 							   vdev_id, m_addr);
4642 		if (vdev && vdev->vap_bss_peer) {
4643 		    bss_peer = vdev->vap_bss_peer;
4644 		    DP_UPDATE_STATS(vdev, peer);
4645 		}
4646 	}
4647 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4648 	qdf_mem_free(peer);
4649 }
4650 
4651 /**
4652  * dp_delete_pending_vdev() - check and process vdev delete
4653  * @pdev: DP specific pdev pointer
4654  * @vdev: DP specific vdev pointer
4655  * @vdev_id: vdev id corresponding to vdev
4656  *
4657  * This API does following:
4658  * 1) It releases tx flow pools buffers as vdev is
4659  *    going down and no peers are associated.
4660  * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory
4661  */
4662 static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev,
4663 				   uint8_t vdev_id)
4664 {
4665 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
4666 	void *vdev_delete_context = NULL;
4667 
4668 	vdev_delete_cb = vdev->delete.callback;
4669 	vdev_delete_context = vdev->delete.context;
4670 
4671 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4672 		  FL("deleting vdev object %pK (%pM)- its last peer is done"),
4673 		  vdev, vdev->mac_addr.raw);
4674 	/* all peers are gone, go ahead and delete it */
4675 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
4676 			FLOW_TYPE_VDEV, vdev_id);
4677 	dp_tx_vdev_detach(vdev);
4678 
4679 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4680 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
4681 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4682 
4683 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4684 		  FL("deleting vdev object %pK (%pM)"),
4685 		  vdev, vdev->mac_addr.raw);
4686 	qdf_mem_free(vdev);
4687 	vdev = NULL;
4688 
4689 	if (vdev_delete_cb)
4690 		vdev_delete_cb(vdev_delete_context);
4691 }
4692 
4693 /*
4694  * dp_peer_unref_delete() - unref and delete peer
4695  * @peer_handle:		Datapath peer handle
4696  *
4697  */
4698 void dp_peer_unref_delete(void *peer_handle)
4699 {
4700 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4701 	struct dp_vdev *vdev = peer->vdev;
4702 	struct dp_pdev *pdev = vdev->pdev;
4703 	struct dp_soc *soc = pdev->soc;
4704 	struct dp_peer *tmppeer;
4705 	int found = 0;
4706 	uint16_t peer_id;
4707 	uint16_t vdev_id;
4708 	bool delete_vdev;
4709 
4710 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4711 		  "%s: peer %pK ref_cnt(before decrement): %d\n", __func__,
4712 		  peer, qdf_atomic_read(&peer->ref_cnt));
4713 	/*
4714 	 * Hold the lock all the way from checking if the peer ref count
4715 	 * is zero until the peer references are removed from the hash
4716 	 * table and vdev list (if the peer ref count is zero).
4717 	 * This protects against a new HL tx operation starting to use the
4718 	 * peer object just after this function concludes it's done being used.
4719 	 * Furthermore, the lock needs to be held while checking whether the
4720 	 * vdev's list of peers is empty, to make sure that list is not modified
4721 	 * concurrently with the empty check.
4722 	 */
4723 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4724 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
4725 		peer_id = peer->peer_ids[0];
4726 		vdev_id = vdev->vdev_id;
4727 
4728 		/*
4729 		 * Make sure that the reference to the peer in
4730 		 * peer object map is removed
4731 		 */
4732 		if (peer_id != HTT_INVALID_PEER)
4733 			soc->peer_id_to_obj_map[peer_id] = NULL;
4734 
4735 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4736 			"Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
4737 
4738 		/* remove the reference to the peer from the hash table */
4739 		dp_peer_find_hash_remove(soc, peer);
4740 
4741 		qdf_spin_lock_bh(&soc->ast_lock);
4742 		if (peer->self_ast_entry) {
4743 			dp_peer_del_ast(soc, peer->self_ast_entry);
4744 			peer->self_ast_entry = NULL;
4745 		}
4746 		qdf_spin_unlock_bh(&soc->ast_lock);
4747 
4748 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
4749 			if (tmppeer == peer) {
4750 				found = 1;
4751 				break;
4752 			}
4753 		}
4754 
4755 		if (found) {
4756 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
4757 				peer_list_elem);
4758 		} else {
4759 			/*Ignoring the remove operation as peer not found*/
4760 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4761 				  "peer:%pK not found in vdev:%pK peerlist:%pK",
4762 				  peer, vdev, &peer->vdev->peer_list);
4763 		}
4764 
4765 		/* cleanup the peer data */
4766 		dp_peer_cleanup(vdev, peer);
4767 
4768 		/* check whether the parent vdev has no peers left */
4769 		if (TAILQ_EMPTY(&vdev->peer_list)) {
4770 			/*
4771 			 * capture vdev delete pending flag's status
4772 			 * while holding peer_ref_mutex lock
4773 			 */
4774 			delete_vdev = vdev->delete.pending;
4775 			/*
4776 			 * Now that there are no references to the peer, we can
4777 			 * release the peer reference lock.
4778 			 */
4779 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4780 			/*
4781 			 * Check if the parent vdev was waiting for its peers
4782 			 * to be deleted, in order for it to be deleted too.
4783 			 */
4784 			if (delete_vdev)
4785 				dp_delete_pending_vdev(pdev, vdev, vdev_id);
4786 		} else {
4787 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4788 		}
4789 		dp_reset_and_release_peer_mem(soc, pdev, peer, vdev_id);
4790 
4791 	} else {
4792 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4793 	}
4794 }
4795 
4796 /*
4797  * dp_peer_detach_wifi3() – Detach txrx peer
4798  * @peer_handle: Datapath peer handle
4799  * @bitmap: bitmap indicating special handling of request.
4800  *
4801  */
4802 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
4803 {
4804 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4805 
4806 	/* redirect the peer's rx delivery function to point to a
4807 	 * discard func
4808 	 */
4809 
4810 	peer->rx_opt_proc = dp_rx_discard;
4811 	peer->ctrl_peer = NULL;
4812 
4813 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4814 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
4815 
4816 	dp_local_peer_id_free(peer->vdev->pdev, peer);
4817 	qdf_spinlock_destroy(&peer->peer_info_lock);
4818 
4819 	/*
4820 	 * Remove the reference added during peer_attach.
4821 	 * The peer will still be left allocated until the
4822 	 * PEER_UNMAP message arrives to remove the other
4823 	 * reference, added by the PEER_MAP message.
4824 	 */
4825 	dp_peer_unref_delete(peer_handle);
4826 }
4827 
4828 /*
4829  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
4830  * @peer_handle:		Datapath peer handle
4831  *
4832  */
4833 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
4834 {
4835 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4836 	return vdev->mac_addr.raw;
4837 }
4838 
4839 /*
4840  * dp_vdev_set_wds() - Enable per packet stats
4841  * @vdev_handle: DP VDEV handle
4842  * @val: value
4843  *
4844  * Return: none
4845  */
4846 static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
4847 {
4848 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4849 
4850 	vdev->wds_enabled = val;
4851 	return 0;
4852 }
4853 
4854 /*
4855  * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
4856  * @peer_handle:		Datapath peer handle
4857  *
4858  */
4859 static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
4860 						uint8_t vdev_id)
4861 {
4862 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
4863 	struct dp_vdev *vdev = NULL;
4864 
4865 	if (qdf_unlikely(!pdev))
4866 		return NULL;
4867 
4868 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4869 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4870 		if (vdev->vdev_id == vdev_id)
4871 			break;
4872 	}
4873 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4874 
4875 	return (struct cdp_vdev *)vdev;
4876 }
4877 
4878 static int dp_get_opmode(struct cdp_vdev *vdev_handle)
4879 {
4880 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4881 
4882 	return vdev->opmode;
4883 }
4884 
4885 static
4886 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_vdev *pvdev,
4887 					  ol_txrx_rx_fp *stack_fn_p,
4888 					  ol_osif_vdev_handle *osif_vdev_p)
4889 {
4890 	struct dp_vdev *vdev = dp_get_dp_vdev_from_cdp_vdev(pvdev);
4891 
4892 	qdf_assert(vdev);
4893 	*stack_fn_p = vdev->osif_rx_stack;
4894 	*osif_vdev_p = vdev->osif_vdev;
4895 }
4896 
4897 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
4898 {
4899 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4900 	struct dp_pdev *pdev = vdev->pdev;
4901 
4902 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
4903 }
4904 
4905 /**
4906  * dp_reset_monitor_mode() - Disable monitor mode
4907  * @pdev_handle: Datapath PDEV handle
4908  *
4909  * Return: 0 on success, not 0 on failure
4910  */
4911 static int dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
4912 {
4913 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4914 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4915 	struct dp_soc *soc = pdev->soc;
4916 	uint8_t pdev_id;
4917 	int mac_id;
4918 
4919 	pdev_id = pdev->pdev_id;
4920 	soc = pdev->soc;
4921 
4922 	qdf_spin_lock_bh(&pdev->mon_lock);
4923 
4924 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4925 
4926 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4927 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4928 
4929 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4930 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4931 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4932 
4933 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4934 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4935 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4936 	}
4937 
4938 	pdev->monitor_vdev = NULL;
4939 
4940 	qdf_spin_unlock_bh(&pdev->mon_lock);
4941 
4942 	return 0;
4943 }
4944 
4945 /**
4946  * dp_set_nac() - set peer_nac
4947  * @peer_handle: Datapath PEER handle
4948  *
4949  * Return: void
4950  */
4951 static void dp_set_nac(struct cdp_peer *peer_handle)
4952 {
4953 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4954 
4955 	peer->nac = 1;
4956 }
4957 
4958 /**
4959  * dp_get_tx_pending() - read pending tx
4960  * @pdev_handle: Datapath PDEV handle
4961  *
4962  * Return: outstanding tx
4963  */
4964 static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
4965 {
4966 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4967 
4968 	return qdf_atomic_read(&pdev->num_tx_outstanding);
4969 }
4970 
4971 /**
4972  * dp_get_peer_mac_from_peer_id() - get peer mac
4973  * @pdev_handle: Datapath PDEV handle
4974  * @peer_id: Peer ID
4975  * @peer_mac: MAC addr of PEER
4976  *
4977  * Return: void
4978  */
4979 static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
4980 	uint32_t peer_id, uint8_t *peer_mac)
4981 {
4982 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4983 	struct dp_peer *peer;
4984 
4985 	if (pdev && peer_mac) {
4986 		peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
4987 		if (peer) {
4988 			if (peer->mac_addr.raw)
4989 				qdf_mem_copy(peer_mac, peer->mac_addr.raw,
4990 					     DP_MAC_ADDR_LEN);
4991 			dp_peer_unref_del_find_by_id(peer);
4992 		}
4993 	}
4994 }
4995 
4996 /**
4997  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
4998  * @vdev_handle: Datapath VDEV handle
4999  * @smart_monitor: Flag to denote if its smart monitor mode
5000  *
5001  * Return: 0 on success, not 0 on failure
5002  */
5003 static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
5004 		uint8_t smart_monitor)
5005 {
5006 	/* Many monitor VAPs can exists in a system but only one can be up at
5007 	 * anytime
5008 	 */
5009 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5010 	struct dp_pdev *pdev;
5011 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5012 	struct dp_soc *soc;
5013 	uint8_t pdev_id;
5014 	int mac_id;
5015 
5016 	qdf_assert(vdev);
5017 
5018 	pdev = vdev->pdev;
5019 	pdev_id = pdev->pdev_id;
5020 	soc = pdev->soc;
5021 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
5022 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
5023 		pdev, pdev_id, soc, vdev);
5024 
5025 	/*Check if current pdev's monitor_vdev exists */
5026 	if (pdev->monitor_vdev) {
5027 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5028 			"vdev=%pK", vdev);
5029 		qdf_assert(vdev);
5030 	}
5031 
5032 	pdev->monitor_vdev = vdev;
5033 
5034 	/* If smart monitor mode, do not configure monitor ring */
5035 	if (smart_monitor)
5036 		return QDF_STATUS_SUCCESS;
5037 
5038 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5039 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
5040 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
5041 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
5042 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
5043 		pdev->mo_data_filter);
5044 
5045 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5046 
5047 	htt_tlv_filter.mpdu_start = 1;
5048 	htt_tlv_filter.msdu_start = 1;
5049 	htt_tlv_filter.packet = 1;
5050 	htt_tlv_filter.msdu_end = 1;
5051 	htt_tlv_filter.mpdu_end = 1;
5052 	htt_tlv_filter.packet_header = 1;
5053 	htt_tlv_filter.attention = 1;
5054 	htt_tlv_filter.ppdu_start = 0;
5055 	htt_tlv_filter.ppdu_end = 0;
5056 	htt_tlv_filter.ppdu_end_user_stats = 0;
5057 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
5058 	htt_tlv_filter.ppdu_end_status_done = 0;
5059 	htt_tlv_filter.header_per_msdu = 1;
5060 	htt_tlv_filter.enable_fp =
5061 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
5062 	htt_tlv_filter.enable_md = 0;
5063 	htt_tlv_filter.enable_mo =
5064 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
5065 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
5066 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
5067 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
5068 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
5069 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
5070 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
5071 
5072 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5073 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5074 
5075 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5076 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
5077 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
5078 	}
5079 
5080 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5081 
5082 	htt_tlv_filter.mpdu_start = 1;
5083 	htt_tlv_filter.msdu_start = 0;
5084 	htt_tlv_filter.packet = 0;
5085 	htt_tlv_filter.msdu_end = 0;
5086 	htt_tlv_filter.mpdu_end = 0;
5087 	htt_tlv_filter.attention = 0;
5088 	htt_tlv_filter.ppdu_start = 1;
5089 	htt_tlv_filter.ppdu_end = 1;
5090 	htt_tlv_filter.ppdu_end_user_stats = 1;
5091 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5092 	htt_tlv_filter.ppdu_end_status_done = 1;
5093 	htt_tlv_filter.enable_fp = 1;
5094 	htt_tlv_filter.enable_md = 0;
5095 	htt_tlv_filter.enable_mo = 1;
5096 	if (pdev->mcopy_mode) {
5097 		htt_tlv_filter.packet_header = 1;
5098 	}
5099 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5100 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5101 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5102 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5103 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5104 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
5105 
5106 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5107 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5108 						pdev->pdev_id);
5109 
5110 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5111 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5112 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5113 	}
5114 
5115 	return QDF_STATUS_SUCCESS;
5116 }
5117 
5118 /**
5119  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
5120  * @pdev_handle: Datapath PDEV handle
5121  * @filter_val: Flag to select Filter for monitor mode
5122  * Return: 0 on success, not 0 on failure
5123  */
5124 static int dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
5125 	struct cdp_monitor_filter *filter_val)
5126 {
5127 	/* Many monitor VAPs can exists in a system but only one can be up at
5128 	 * anytime
5129 	 */
5130 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5131 	struct dp_vdev *vdev = pdev->monitor_vdev;
5132 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5133 	struct dp_soc *soc;
5134 	uint8_t pdev_id;
5135 	int mac_id;
5136 
5137 	pdev_id = pdev->pdev_id;
5138 	soc = pdev->soc;
5139 
5140 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
5141 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
5142 		pdev, pdev_id, soc, vdev);
5143 
5144 	/*Check if current pdev's monitor_vdev exists */
5145 	if (!pdev->monitor_vdev) {
5146 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5147 			"vdev=%pK", vdev);
5148 		qdf_assert(vdev);
5149 	}
5150 
5151 	/* update filter mode, type in pdev structure */
5152 	pdev->mon_filter_mode = filter_val->mode;
5153 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
5154 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
5155 	pdev->fp_data_filter = filter_val->fp_data;
5156 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
5157 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
5158 	pdev->mo_data_filter = filter_val->mo_data;
5159 
5160 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5161 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
5162 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
5163 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
5164 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
5165 		pdev->mo_data_filter);
5166 
5167 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5168 
5169 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5170 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5171 
5172 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5173 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
5174 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
5175 
5176 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5177 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5178 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5179 	}
5180 
5181 	htt_tlv_filter.mpdu_start = 1;
5182 	htt_tlv_filter.msdu_start = 1;
5183 	htt_tlv_filter.packet = 1;
5184 	htt_tlv_filter.msdu_end = 1;
5185 	htt_tlv_filter.mpdu_end = 1;
5186 	htt_tlv_filter.packet_header = 1;
5187 	htt_tlv_filter.attention = 1;
5188 	htt_tlv_filter.ppdu_start = 0;
5189 	htt_tlv_filter.ppdu_end = 0;
5190 	htt_tlv_filter.ppdu_end_user_stats = 0;
5191 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
5192 	htt_tlv_filter.ppdu_end_status_done = 0;
5193 	htt_tlv_filter.header_per_msdu = 1;
5194 	htt_tlv_filter.enable_fp =
5195 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
5196 	htt_tlv_filter.enable_md = 0;
5197 	htt_tlv_filter.enable_mo =
5198 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
5199 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
5200 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
5201 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
5202 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
5203 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
5204 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
5205 
5206 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5207 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5208 
5209 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5210 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
5211 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
5212 	}
5213 
5214 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5215 
5216 	htt_tlv_filter.mpdu_start = 1;
5217 	htt_tlv_filter.msdu_start = 0;
5218 	htt_tlv_filter.packet = 0;
5219 	htt_tlv_filter.msdu_end = 0;
5220 	htt_tlv_filter.mpdu_end = 0;
5221 	htt_tlv_filter.attention = 0;
5222 	htt_tlv_filter.ppdu_start = 1;
5223 	htt_tlv_filter.ppdu_end = 1;
5224 	htt_tlv_filter.ppdu_end_user_stats = 1;
5225 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5226 	htt_tlv_filter.ppdu_end_status_done = 1;
5227 	htt_tlv_filter.enable_fp = 1;
5228 	htt_tlv_filter.enable_md = 0;
5229 	htt_tlv_filter.enable_mo = 1;
5230 	if (pdev->mcopy_mode) {
5231 		htt_tlv_filter.packet_header = 1;
5232 	}
5233 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5234 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5235 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5236 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5237 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5238 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
5239 
5240 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5241 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5242 						pdev->pdev_id);
5243 
5244 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5245 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5246 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5247 	}
5248 
5249 	return QDF_STATUS_SUCCESS;
5250 }
5251 
5252 /**
5253  * dp_get_pdev_id_frm_pdev() - get pdev_id
5254  * @pdev_handle: Datapath PDEV handle
5255  *
5256  * Return: pdev_id
5257  */
5258 static
5259 uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
5260 {
5261 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5262 
5263 	return pdev->pdev_id;
5264 }
5265 
5266 /**
5267  * dp_pdev_set_chan_noise_floor() - set channel noise floor
5268  * @pdev_handle: Datapath PDEV handle
5269  * @chan_noise_floor: Channel Noise Floor
5270  *
5271  * Return: void
5272  */
5273 static
5274 void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
5275 				  int16_t chan_noise_floor)
5276 {
5277 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5278 
5279 	pdev->chan_noise_floor = chan_noise_floor;
5280 }
5281 
5282 /**
5283  * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
5284  * @vdev_handle: Datapath VDEV handle
5285  * Return: true on ucast filter flag set
5286  */
5287 static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
5288 {
5289 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5290 	struct dp_pdev *pdev;
5291 
5292 	pdev = vdev->pdev;
5293 
5294 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
5295 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
5296 		return true;
5297 
5298 	return false;
5299 }
5300 
5301 /**
5302  * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
5303  * @vdev_handle: Datapath VDEV handle
5304  * Return: true on mcast filter flag set
5305  */
5306 static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
5307 {
5308 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5309 	struct dp_pdev *pdev;
5310 
5311 	pdev = vdev->pdev;
5312 
5313 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
5314 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
5315 		return true;
5316 
5317 	return false;
5318 }
5319 
5320 /**
5321  * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
5322  * @vdev_handle: Datapath VDEV handle
5323  * Return: true on non data filter flag set
5324  */
5325 static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
5326 {
5327 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5328 	struct dp_pdev *pdev;
5329 
5330 	pdev = vdev->pdev;
5331 
5332 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
5333 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
5334 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
5335 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
5336 			return true;
5337 		}
5338 	}
5339 
5340 	return false;
5341 }
5342 
5343 #ifdef MESH_MODE_SUPPORT
5344 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
5345 {
5346 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5347 
5348 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5349 		FL("val %d"), val);
5350 	vdev->mesh_vdev = val;
5351 }
5352 
5353 /*
5354  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
5355  * @vdev_hdl: virtual device object
5356  * @val: value to be set
5357  *
5358  * Return: void
5359  */
5360 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
5361 {
5362 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5363 
5364 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5365 		FL("val %d"), val);
5366 	vdev->mesh_rx_filter = val;
5367 }
5368 #endif
5369 
5370 /*
5371  * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
5372  * Current scope is bar received count
5373  *
5374  * @pdev_handle: DP_PDEV handle
5375  *
5376  * Return: void
5377  */
5378 #define STATS_PROC_TIMEOUT        (HZ/1000)
5379 
5380 static void
5381 dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
5382 {
5383 	struct dp_vdev *vdev;
5384 	struct dp_peer *peer;
5385 	uint32_t waitcnt;
5386 
5387 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5388 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5389 			if (!peer) {
5390 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5391 					FL("DP Invalid Peer refernce"));
5392 				return;
5393 			}
5394 
5395 			if (peer->delete_in_progress) {
5396 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5397 					FL("DP Peer deletion in progress"));
5398 				continue;
5399 			}
5400 
5401 			qdf_atomic_inc(&peer->ref_cnt);
5402 			waitcnt = 0;
5403 			dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
5404 			while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
5405 				&& waitcnt < 10) {
5406 				schedule_timeout_interruptible(
5407 						STATS_PROC_TIMEOUT);
5408 				waitcnt++;
5409 			}
5410 			qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
5411 			dp_peer_unref_delete(peer);
5412 		}
5413 	}
5414 }
5415 
5416 /**
5417  * dp_rx_bar_stats_cb(): BAR received stats callback
5418  * @soc: SOC handle
5419  * @cb_ctxt: Call back context
5420  * @reo_status: Reo status
5421  *
5422  * return: void
5423  */
5424 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
5425 	union hal_reo_status *reo_status)
5426 {
5427 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
5428 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
5429 
5430 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
5431 		DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
5432 			queue_status->header.status);
5433 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
5434 		return;
5435 	}
5436 
5437 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
5438 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
5439 
5440 }
5441 
5442 /**
5443  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
5444  * @vdev: DP VDEV handle
5445  *
5446  * return: void
5447  */
5448 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
5449 			     struct cdp_vdev_stats *vdev_stats)
5450 {
5451 	struct dp_peer *peer = NULL;
5452 	struct dp_soc *soc = vdev->pdev->soc;
5453 
5454 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
5455 
5456 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
5457 		dp_update_vdev_stats(vdev_stats, peer);
5458 
5459 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5460 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
5461 			&vdev->stats, (uint16_t) vdev->vdev_id,
5462 			UPDATE_VDEV_STATS);
5463 
5464 }
5465 
5466 /**
5467  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
5468  * @pdev: DP PDEV handle
5469  *
5470  * return: void
5471  */
5472 static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
5473 {
5474 	struct dp_vdev *vdev = NULL;
5475 	struct dp_soc *soc = pdev->soc;
5476 	struct cdp_vdev_stats *vdev_stats =
5477 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
5478 
5479 	if (!vdev_stats) {
5480 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5481 			  "DP alloc failure - unable to get alloc vdev stats");
5482 		return;
5483 	}
5484 
5485 	qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
5486 	qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
5487 	qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
5488 
5489 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5490 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5491 
5492 		dp_aggregate_vdev_stats(vdev, vdev_stats);
5493 		dp_update_pdev_stats(pdev, vdev_stats);
5494 
5495 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
5496 
5497 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
5498 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
5499 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
5500 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
5501 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
5502 		DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
5503 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
5504 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host.num);
5505 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
5506 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host.num);
5507 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
5508 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
5509 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
5510 		DP_STATS_AGGR(pdev, vdev,
5511 				tx_i.mcast_en.dropped_map_error);
5512 		DP_STATS_AGGR(pdev, vdev,
5513 				tx_i.mcast_en.dropped_self_mac);
5514 		DP_STATS_AGGR(pdev, vdev,
5515 				tx_i.mcast_en.dropped_send_fail);
5516 		DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
5517 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
5518 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
5519 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
5520 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na.num);
5521 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
5522 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.headroom_insufficient);
5523 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified);
5524 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw);
5525 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.exception_fw);
5526 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.completion_fw);
5527 
5528 		pdev->stats.tx_i.dropped.dropped_pkt.num =
5529 			pdev->stats.tx_i.dropped.dma_error +
5530 			pdev->stats.tx_i.dropped.ring_full +
5531 			pdev->stats.tx_i.dropped.enqueue_fail +
5532 			pdev->stats.tx_i.dropped.desc_na.num +
5533 			pdev->stats.tx_i.dropped.res_full;
5534 
5535 		pdev->stats.tx.last_ack_rssi =
5536 			vdev->stats.tx.last_ack_rssi;
5537 		pdev->stats.tx_i.tso.num_seg =
5538 			vdev->stats.tx_i.tso.num_seg;
5539 	}
5540 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5541 	qdf_mem_free(vdev_stats);
5542 
5543 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5544 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
5545 				&pdev->stats, pdev->pdev_id, UPDATE_PDEV_STATS);
5546 
5547 }
5548 
5549 /**
5550  * dp_vdev_getstats() - get vdev packet level stats
5551  * @vdev_handle: Datapath VDEV handle
5552  * @stats: cdp network device stats structure
5553  *
5554  * Return: void
5555  */
5556 static void dp_vdev_getstats(void *vdev_handle,
5557 		struct cdp_dev_stats *stats)
5558 {
5559 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5560 	struct cdp_vdev_stats *vdev_stats =
5561 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
5562 
5563 	if (!vdev_stats) {
5564 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5565 			  "DP alloc failure - unable to get alloc vdev stats");
5566 		return;
5567 	}
5568 
5569 	dp_aggregate_vdev_stats(vdev, vdev_stats);
5570 
5571 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
5572 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
5573 
5574 	stats->tx_errors = vdev_stats->tx.tx_failed +
5575 		vdev_stats->tx_i.dropped.dropped_pkt.num;
5576 	stats->tx_dropped = stats->tx_errors;
5577 
5578 	stats->rx_packets = vdev_stats->rx.unicast.num +
5579 		vdev_stats->rx.multicast.num +
5580 		vdev_stats->rx.bcast.num;
5581 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
5582 		vdev_stats->rx.multicast.bytes +
5583 		vdev_stats->rx.bcast.bytes;
5584 
5585 }
5586 
5587 
5588 /**
5589  * dp_pdev_getstats() - get pdev packet level stats
5590  * @pdev_handle: Datapath PDEV handle
5591  * @stats: cdp network device stats structure
5592  *
5593  * Return: void
5594  */
5595 static void dp_pdev_getstats(void *pdev_handle,
5596 		struct cdp_dev_stats *stats)
5597 {
5598 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5599 
5600 	dp_aggregate_pdev_stats(pdev);
5601 
5602 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
5603 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
5604 
5605 	stats->tx_errors = pdev->stats.tx.tx_failed +
5606 		pdev->stats.tx_i.dropped.dropped_pkt.num;
5607 	stats->tx_dropped = stats->tx_errors;
5608 
5609 	stats->rx_packets = pdev->stats.rx.unicast.num +
5610 		pdev->stats.rx.multicast.num +
5611 		pdev->stats.rx.bcast.num;
5612 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
5613 		pdev->stats.rx.multicast.bytes +
5614 		pdev->stats.rx.bcast.bytes;
5615 }
5616 
5617 /**
5618  * dp_get_device_stats() - get interface level packet stats
5619  * @handle: device handle
5620  * @stats: cdp network device stats structure
5621  * @type: device type pdev/vdev
5622  *
5623  * Return: void
5624  */
5625 static void dp_get_device_stats(void *handle,
5626 		struct cdp_dev_stats *stats, uint8_t type)
5627 {
5628 	switch (type) {
5629 	case UPDATE_VDEV_STATS:
5630 		dp_vdev_getstats(handle, stats);
5631 		break;
5632 	case UPDATE_PDEV_STATS:
5633 		dp_pdev_getstats(handle, stats);
5634 		break;
5635 	default:
5636 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5637 			"apstats cannot be updated for this input "
5638 			"type %d", type);
5639 		break;
5640 	}
5641 
5642 }
5643 
5644 
5645 /**
5646  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
5647  * @pdev: DP_PDEV Handle
5648  *
5649  * Return:void
5650  */
5651 static inline void
5652 dp_print_pdev_tx_stats(struct dp_pdev *pdev)
5653 {
5654 	uint8_t index = 0;
5655 	DP_PRINT_STATS("PDEV Tx Stats:\n");
5656 	DP_PRINT_STATS("Received From Stack:");
5657 	DP_PRINT_STATS("	Packets = %d",
5658 			pdev->stats.tx_i.rcvd.num);
5659 	DP_PRINT_STATS("	Bytes = %llu",
5660 			pdev->stats.tx_i.rcvd.bytes);
5661 	DP_PRINT_STATS("Processed:");
5662 	DP_PRINT_STATS("	Packets = %d",
5663 			pdev->stats.tx_i.processed.num);
5664 	DP_PRINT_STATS("	Bytes = %llu",
5665 			pdev->stats.tx_i.processed.bytes);
5666 	DP_PRINT_STATS("Total Completions:");
5667 	DP_PRINT_STATS("	Packets = %u",
5668 			pdev->stats.tx.comp_pkt.num);
5669 	DP_PRINT_STATS("	Bytes = %llu",
5670 			pdev->stats.tx.comp_pkt.bytes);
5671 	DP_PRINT_STATS("Successful Completions:");
5672 	DP_PRINT_STATS("	Packets = %u",
5673 			pdev->stats.tx.tx_success.num);
5674 	DP_PRINT_STATS("	Bytes = %llu",
5675 			pdev->stats.tx.tx_success.bytes);
5676 	DP_PRINT_STATS("Dropped:");
5677 	DP_PRINT_STATS("	Total = %d",
5678 			pdev->stats.tx_i.dropped.dropped_pkt.num);
5679 	DP_PRINT_STATS("	Dma_map_error = %d",
5680 			pdev->stats.tx_i.dropped.dma_error);
5681 	DP_PRINT_STATS("	Ring Full = %d",
5682 			pdev->stats.tx_i.dropped.ring_full);
5683 	DP_PRINT_STATS("	Descriptor Not available = %d",
5684 			pdev->stats.tx_i.dropped.desc_na.num);
5685 	DP_PRINT_STATS("	HW enqueue failed= %d",
5686 			pdev->stats.tx_i.dropped.enqueue_fail);
5687 	DP_PRINT_STATS("	Resources Full = %d",
5688 			pdev->stats.tx_i.dropped.res_full);
5689 	DP_PRINT_STATS("	FW removed = %d",
5690 			pdev->stats.tx.dropped.fw_rem);
5691 	DP_PRINT_STATS("	FW removed transmitted = %d",
5692 			pdev->stats.tx.dropped.fw_rem_tx);
5693 	DP_PRINT_STATS("	FW removed untransmitted = %d",
5694 			pdev->stats.tx.dropped.fw_rem_notx);
5695 	DP_PRINT_STATS("	FW removed untransmitted fw_reason1 = %d",
5696 			pdev->stats.tx.dropped.fw_reason1);
5697 	DP_PRINT_STATS("	FW removed untransmitted fw_reason2 = %d",
5698 			pdev->stats.tx.dropped.fw_reason2);
5699 	DP_PRINT_STATS("	FW removed untransmitted fw_reason3 = %d",
5700 			pdev->stats.tx.dropped.fw_reason3);
5701 	DP_PRINT_STATS("	Aged Out from msdu/mpdu queues = %d",
5702 			pdev->stats.tx.dropped.age_out);
5703 	DP_PRINT_STATS("	headroom insufficient = %d",
5704 			pdev->stats.tx_i.dropped.headroom_insufficient);
5705 	DP_PRINT_STATS("	Multicast:");
5706 	DP_PRINT_STATS("	Packets: %u",
5707 		       pdev->stats.tx.mcast.num);
5708 	DP_PRINT_STATS("	Bytes: %llu",
5709 		       pdev->stats.tx.mcast.bytes);
5710 	DP_PRINT_STATS("Scatter Gather:");
5711 	DP_PRINT_STATS("	Packets = %d",
5712 			pdev->stats.tx_i.sg.sg_pkt.num);
5713 	DP_PRINT_STATS("	Bytes = %llu",
5714 			pdev->stats.tx_i.sg.sg_pkt.bytes);
5715 	DP_PRINT_STATS("	Dropped By Host = %d",
5716 			pdev->stats.tx_i.sg.dropped_host.num);
5717 	DP_PRINT_STATS("	Dropped By Target = %d",
5718 			pdev->stats.tx_i.sg.dropped_target);
5719 	DP_PRINT_STATS("TSO:");
5720 	DP_PRINT_STATS("	Number of Segments = %d",
5721 			pdev->stats.tx_i.tso.num_seg);
5722 	DP_PRINT_STATS("	Packets = %d",
5723 			pdev->stats.tx_i.tso.tso_pkt.num);
5724 	DP_PRINT_STATS("	Bytes = %llu",
5725 			pdev->stats.tx_i.tso.tso_pkt.bytes);
5726 	DP_PRINT_STATS("	Dropped By Host = %d",
5727 			pdev->stats.tx_i.tso.dropped_host.num);
5728 	DP_PRINT_STATS("Mcast Enhancement:");
5729 	DP_PRINT_STATS("	Packets = %d",
5730 			pdev->stats.tx_i.mcast_en.mcast_pkt.num);
5731 	DP_PRINT_STATS("	Bytes = %llu",
5732 			pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
5733 	DP_PRINT_STATS("	Dropped: Map Errors = %d",
5734 			pdev->stats.tx_i.mcast_en.dropped_map_error);
5735 	DP_PRINT_STATS("	Dropped: Self Mac = %d",
5736 			pdev->stats.tx_i.mcast_en.dropped_self_mac);
5737 	DP_PRINT_STATS("	Dropped: Send Fail = %d",
5738 			pdev->stats.tx_i.mcast_en.dropped_send_fail);
5739 	DP_PRINT_STATS("	Unicast sent = %d",
5740 			pdev->stats.tx_i.mcast_en.ucast);
5741 	DP_PRINT_STATS("Raw:");
5742 	DP_PRINT_STATS("	Packets = %d",
5743 			pdev->stats.tx_i.raw.raw_pkt.num);
5744 	DP_PRINT_STATS("	Bytes = %llu",
5745 			pdev->stats.tx_i.raw.raw_pkt.bytes);
5746 	DP_PRINT_STATS("	DMA map error = %d",
5747 			pdev->stats.tx_i.raw.dma_map_error);
5748 	DP_PRINT_STATS("Reinjected:");
5749 	DP_PRINT_STATS("	Packets = %d",
5750 			pdev->stats.tx_i.reinject_pkts.num);
5751 	DP_PRINT_STATS("	Bytes = %llu\n",
5752 			pdev->stats.tx_i.reinject_pkts.bytes);
5753 	DP_PRINT_STATS("Inspected:");
5754 	DP_PRINT_STATS("	Packets = %d",
5755 			pdev->stats.tx_i.inspect_pkts.num);
5756 	DP_PRINT_STATS("	Bytes = %llu",
5757 			pdev->stats.tx_i.inspect_pkts.bytes);
5758 	DP_PRINT_STATS("Nawds Multicast:");
5759 	DP_PRINT_STATS("	Packets = %d",
5760 			pdev->stats.tx_i.nawds_mcast.num);
5761 	DP_PRINT_STATS("	Bytes = %llu",
5762 			pdev->stats.tx_i.nawds_mcast.bytes);
5763 	DP_PRINT_STATS("CCE Classified:");
5764 	DP_PRINT_STATS("	CCE Classified Packets: %u",
5765 			pdev->stats.tx_i.cce_classified);
5766 	DP_PRINT_STATS("	RAW CCE Classified Packets: %u",
5767 			pdev->stats.tx_i.cce_classified_raw);
5768 	DP_PRINT_STATS("Mesh stats:");
5769 	DP_PRINT_STATS("	frames to firmware: %u",
5770 			pdev->stats.tx_i.mesh.exception_fw);
5771 	DP_PRINT_STATS("	completions from fw: %u",
5772 			pdev->stats.tx_i.mesh.completion_fw);
5773 	DP_PRINT_STATS("PPDU stats counter");
5774 	for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
5775 		DP_PRINT_STATS("	Tag[%d] = %llu", index,
5776 				pdev->stats.ppdu_stats_counter[index]);
5777 	}
5778 }
5779 
5780 /**
5781  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
5782  * @pdev: DP_PDEV Handle
5783  *
5784  * Return: void
5785  */
5786 static inline void
5787 dp_print_pdev_rx_stats(struct dp_pdev *pdev)
5788 {
5789 	DP_PRINT_STATS("PDEV Rx Stats:\n");
5790 	DP_PRINT_STATS("Received From HW (Per Rx Ring):");
5791 	DP_PRINT_STATS("	Packets = %d %d %d %d",
5792 			pdev->stats.rx.rcvd_reo[0].num,
5793 			pdev->stats.rx.rcvd_reo[1].num,
5794 			pdev->stats.rx.rcvd_reo[2].num,
5795 			pdev->stats.rx.rcvd_reo[3].num);
5796 	DP_PRINT_STATS("	Bytes = %llu %llu %llu %llu",
5797 			pdev->stats.rx.rcvd_reo[0].bytes,
5798 			pdev->stats.rx.rcvd_reo[1].bytes,
5799 			pdev->stats.rx.rcvd_reo[2].bytes,
5800 			pdev->stats.rx.rcvd_reo[3].bytes);
5801 	DP_PRINT_STATS("Replenished:");
5802 	DP_PRINT_STATS("	Packets = %d",
5803 			pdev->stats.replenish.pkts.num);
5804 	DP_PRINT_STATS("	Bytes = %llu",
5805 			pdev->stats.replenish.pkts.bytes);
5806 	DP_PRINT_STATS("	Buffers Added To Freelist = %d",
5807 			pdev->stats.buf_freelist);
5808 	DP_PRINT_STATS("	Low threshold intr = %d",
5809 			pdev->stats.replenish.low_thresh_intrs);
5810 	DP_PRINT_STATS("Dropped:");
5811 	DP_PRINT_STATS("	msdu_not_done = %d",
5812 			pdev->stats.dropped.msdu_not_done);
5813 	DP_PRINT_STATS("        mon_rx_drop = %d",
5814 			pdev->stats.dropped.mon_rx_drop);
5815 	DP_PRINT_STATS("Sent To Stack:");
5816 	DP_PRINT_STATS("	Packets = %d",
5817 			pdev->stats.rx.to_stack.num);
5818 	DP_PRINT_STATS("	Bytes = %llu",
5819 			pdev->stats.rx.to_stack.bytes);
5820 	DP_PRINT_STATS("Multicast/Broadcast:");
5821 	DP_PRINT_STATS("	Packets = %d",
5822 			(pdev->stats.rx.multicast.num +
5823 			pdev->stats.rx.bcast.num));
5824 	DP_PRINT_STATS("	Bytes = %llu",
5825 			(pdev->stats.rx.multicast.bytes +
5826 			pdev->stats.rx.bcast.bytes));
5827 	DP_PRINT_STATS("Errors:");
5828 	DP_PRINT_STATS("	Rxdma Ring Un-inititalized = %d",
5829 			pdev->stats.replenish.rxdma_err);
5830 	DP_PRINT_STATS("	Desc Alloc Failed: = %d",
5831 			pdev->stats.err.desc_alloc_fail);
5832 	DP_PRINT_STATS("	IP checksum error = %d",
5833 		       pdev->stats.err.ip_csum_err);
5834 	DP_PRINT_STATS("	TCP/UDP checksum error = %d",
5835 		       pdev->stats.err.tcp_udp_csum_err);
5836 
5837 	/* Get bar_recv_cnt */
5838 	dp_aggregate_pdev_ctrl_frames_stats(pdev);
5839 	DP_PRINT_STATS("BAR Received Count: = %d",
5840 			pdev->stats.rx.bar_recv_cnt);
5841 
5842 }
5843 
5844 /**
5845  * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
5846  * @pdev: DP_PDEV Handle
5847  *
5848  * Return: void
5849  */
5850 static inline void
5851 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
5852 {
5853 	struct cdp_pdev_mon_stats *rx_mon_stats;
5854 
5855 	rx_mon_stats = &pdev->rx_mon_stats;
5856 
5857 	DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
5858 
5859 	dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
5860 
5861 	DP_PRINT_STATS("status_ppdu_done_cnt = %d",
5862 		       rx_mon_stats->status_ppdu_done);
5863 	DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
5864 		       rx_mon_stats->dest_ppdu_done);
5865 	DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
5866 		       rx_mon_stats->dest_mpdu_done);
5867 	DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
5868 		       rx_mon_stats->dest_mpdu_drop);
5869 }
5870 
5871 /**
5872  * dp_print_soc_tx_stats(): Print SOC level  stats
5873  * @soc DP_SOC Handle
5874  *
5875  * Return: void
5876  */
5877 static inline void
5878 dp_print_soc_tx_stats(struct dp_soc *soc)
5879 {
5880 	uint8_t desc_pool_id;
5881 	soc->stats.tx.desc_in_use = 0;
5882 
5883 	DP_PRINT_STATS("SOC Tx Stats:\n");
5884 
5885 	for (desc_pool_id = 0;
5886 	     desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5887 	     desc_pool_id++)
5888 		soc->stats.tx.desc_in_use +=
5889 			soc->tx_desc[desc_pool_id].num_allocated;
5890 
5891 	DP_PRINT_STATS("Tx Descriptors In Use = %d",
5892 			soc->stats.tx.desc_in_use);
5893 	DP_PRINT_STATS("Invalid peer:");
5894 	DP_PRINT_STATS("	Packets = %d",
5895 			soc->stats.tx.tx_invalid_peer.num);
5896 	DP_PRINT_STATS("	Bytes = %llu",
5897 			soc->stats.tx.tx_invalid_peer.bytes);
5898 	DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
5899 			soc->stats.tx.tcl_ring_full[0],
5900 			soc->stats.tx.tcl_ring_full[1],
5901 			soc->stats.tx.tcl_ring_full[2]);
5902 
5903 }
5904 /**
5905  * dp_print_soc_rx_stats: Print SOC level Rx stats
5906  * @soc: DP_SOC Handle
5907  *
5908  * Return:void
5909  */
5910 static inline void
5911 dp_print_soc_rx_stats(struct dp_soc *soc)
5912 {
5913 	uint32_t i;
5914 	char reo_error[DP_REO_ERR_LENGTH];
5915 	char rxdma_error[DP_RXDMA_ERR_LENGTH];
5916 	uint8_t index = 0;
5917 
5918 	DP_PRINT_STATS("SOC Rx Stats:\n");
5919 	DP_PRINT_STATS("Fragmented packets: %u",
5920 		       soc->stats.rx.rx_frags);
5921 	DP_PRINT_STATS("Reo reinjected packets: %u",
5922 		       soc->stats.rx.reo_reinject);
5923 	DP_PRINT_STATS("Errors:\n");
5924 	DP_PRINT_STATS("Rx Decrypt Errors = %d",
5925 			(soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
5926 			soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
5927 	DP_PRINT_STATS("Invalid RBM = %d",
5928 			soc->stats.rx.err.invalid_rbm);
5929 	DP_PRINT_STATS("Invalid Vdev = %d",
5930 			soc->stats.rx.err.invalid_vdev);
5931 	DP_PRINT_STATS("Invalid Pdev = %d",
5932 			soc->stats.rx.err.invalid_pdev);
5933 	DP_PRINT_STATS("Invalid Peer = %d",
5934 			soc->stats.rx.err.rx_invalid_peer.num);
5935 	DP_PRINT_STATS("HAL Ring Access Fail = %d",
5936 			soc->stats.rx.err.hal_ring_access_fail);
5937 
5938 	for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
5939 		index += qdf_snprint(&rxdma_error[index],
5940 				DP_RXDMA_ERR_LENGTH - index,
5941 				" %d", soc->stats.rx.err.rxdma_error[i]);
5942 	}
5943 	DP_PRINT_STATS("RXDMA Error (0-31):%s",
5944 			rxdma_error);
5945 
5946 	index = 0;
5947 	for (i = 0; i < HAL_REO_ERR_MAX; i++) {
5948 		index += qdf_snprint(&reo_error[index],
5949 				DP_REO_ERR_LENGTH - index,
5950 				" %d", soc->stats.rx.err.reo_error[i]);
5951 	}
5952 	DP_PRINT_STATS("REO Error(0-14):%s",
5953 			reo_error);
5954 }
5955 
5956 
5957 /**
5958  * dp_print_ring_stat_from_hal(): Print hal level ring stats
5959  * @soc: DP_SOC handle
5960  * @srng: DP_SRNG handle
5961  * @ring_name: SRNG name
5962  *
5963  * Return: void
5964  */
5965 static inline void
5966 dp_print_ring_stat_from_hal(struct dp_soc *soc,  struct dp_srng *srng,
5967 	char *ring_name)
5968 {
5969 	uint32_t tailp;
5970 	uint32_t headp;
5971 
5972 	if (srng->hal_srng != NULL) {
5973 		hal_api_get_tphp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
5974 		DP_PRINT_STATS("%s : Head pointer = %d  Tail Pointer = %d\n",
5975 				ring_name, headp, tailp);
5976 	}
5977 }
5978 
5979 /**
5980  * dp_print_ring_stats(): Print tail and head pointer
5981  * @pdev: DP_PDEV handle
5982  *
5983  * Return:void
5984  */
5985 static inline void
5986 dp_print_ring_stats(struct dp_pdev *pdev)
5987 {
5988 	uint32_t i;
5989 	char ring_name[STR_MAXLEN + 1];
5990 	int mac_id;
5991 
5992 	dp_print_ring_stat_from_hal(pdev->soc,
5993 			&pdev->soc->reo_exception_ring,
5994 			"Reo Exception Ring");
5995 	dp_print_ring_stat_from_hal(pdev->soc,
5996 			&pdev->soc->reo_reinject_ring,
5997 			"Reo Inject Ring");
5998 	dp_print_ring_stat_from_hal(pdev->soc,
5999 			&pdev->soc->reo_cmd_ring,
6000 			"Reo Command Ring");
6001 	dp_print_ring_stat_from_hal(pdev->soc,
6002 			&pdev->soc->reo_status_ring,
6003 			"Reo Status Ring");
6004 	dp_print_ring_stat_from_hal(pdev->soc,
6005 			&pdev->soc->rx_rel_ring,
6006 			"Rx Release ring");
6007 	dp_print_ring_stat_from_hal(pdev->soc,
6008 			&pdev->soc->tcl_cmd_ring,
6009 			"Tcl command Ring");
6010 	dp_print_ring_stat_from_hal(pdev->soc,
6011 			&pdev->soc->tcl_status_ring,
6012 			"Tcl Status Ring");
6013 	dp_print_ring_stat_from_hal(pdev->soc,
6014 			&pdev->soc->wbm_desc_rel_ring,
6015 			"Wbm Desc Rel Ring");
6016 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
6017 		snprintf(ring_name, STR_MAXLEN, "Reo Dest Ring %d", i);
6018 		dp_print_ring_stat_from_hal(pdev->soc,
6019 				&pdev->soc->reo_dest_ring[i],
6020 				ring_name);
6021 	}
6022 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
6023 		snprintf(ring_name, STR_MAXLEN, "Tcl Data Ring %d", i);
6024 		dp_print_ring_stat_from_hal(pdev->soc,
6025 				&pdev->soc->tcl_data_ring[i],
6026 				ring_name);
6027 	}
6028 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
6029 		snprintf(ring_name, STR_MAXLEN, "Tx Comp Ring %d", i);
6030 		dp_print_ring_stat_from_hal(pdev->soc,
6031 				&pdev->soc->tx_comp_ring[i],
6032 				ring_name);
6033 	}
6034 	dp_print_ring_stat_from_hal(pdev->soc,
6035 			&pdev->rx_refill_buf_ring,
6036 			"Rx Refill Buf Ring");
6037 
6038 	dp_print_ring_stat_from_hal(pdev->soc,
6039 			&pdev->rx_refill_buf_ring2,
6040 			"Second Rx Refill Buf Ring");
6041 
6042 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6043 		dp_print_ring_stat_from_hal(pdev->soc,
6044 				&pdev->rxdma_mon_buf_ring[mac_id],
6045 				"Rxdma Mon Buf Ring");
6046 		dp_print_ring_stat_from_hal(pdev->soc,
6047 				&pdev->rxdma_mon_dst_ring[mac_id],
6048 				"Rxdma Mon Dst Ring");
6049 		dp_print_ring_stat_from_hal(pdev->soc,
6050 				&pdev->rxdma_mon_status_ring[mac_id],
6051 				"Rxdma Mon Status Ring");
6052 		dp_print_ring_stat_from_hal(pdev->soc,
6053 				&pdev->rxdma_mon_desc_ring[mac_id],
6054 				"Rxdma mon desc Ring");
6055 	}
6056 
6057 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
6058 		snprintf(ring_name, STR_MAXLEN, "Rxdma err dst ring %d", i);
6059 		dp_print_ring_stat_from_hal(pdev->soc,
6060 			&pdev->rxdma_err_dst_ring[i],
6061 			ring_name);
6062 	}
6063 
6064 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
6065 		snprintf(ring_name, STR_MAXLEN, "Rx mac buf ring %d", i);
6066 		dp_print_ring_stat_from_hal(pdev->soc,
6067 				&pdev->rx_mac_buf_ring[i],
6068 				ring_name);
6069 	}
6070 }
6071 
6072 /**
6073  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
6074  * @vdev: DP_VDEV handle
6075  *
6076  * Return:void
6077  */
6078 static inline void
6079 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
6080 {
6081 	struct dp_peer *peer = NULL;
6082 	struct dp_soc *soc = (struct dp_soc *)vdev->pdev->soc;
6083 
6084 	DP_STATS_CLR(vdev->pdev);
6085 	DP_STATS_CLR(vdev->pdev->soc);
6086 	DP_STATS_CLR(vdev);
6087 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6088 		if (!peer)
6089 			return;
6090 		DP_STATS_CLR(peer);
6091 
6092 		if (soc->cdp_soc.ol_ops->update_dp_stats) {
6093 			soc->cdp_soc.ol_ops->update_dp_stats(
6094 					vdev->pdev->ctrl_pdev,
6095 					&peer->stats,
6096 					peer->peer_ids[0],
6097 					UPDATE_PEER_STATS);
6098 		}
6099 
6100 	}
6101 
6102 	if (soc->cdp_soc.ol_ops->update_dp_stats)
6103 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
6104 				&vdev->stats, (uint16_t)vdev->vdev_id,
6105 				UPDATE_VDEV_STATS);
6106 }
6107 
6108 /**
6109  * dp_print_common_rates_info(): Print common rate for tx or rx
6110  * @pkt_type_array: rate type array contains rate info
6111  *
6112  * Return:void
6113  */
6114 static inline void
6115 dp_print_common_rates_info(struct cdp_pkt_type *pkt_type_array)
6116 {
6117 	uint8_t mcs, pkt_type;
6118 
6119 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
6120 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
6121 			if (!dp_rate_string[pkt_type][mcs].valid)
6122 				continue;
6123 
6124 			DP_PRINT_STATS("	%s = %d",
6125 				       dp_rate_string[pkt_type][mcs].mcs_type,
6126 				       pkt_type_array[pkt_type].mcs_count[mcs]);
6127 		}
6128 
6129 		DP_PRINT_STATS("\n");
6130 	}
6131 }
6132 
6133 /**
6134  * dp_print_rx_rates(): Print Rx rate stats
6135  * @vdev: DP_VDEV handle
6136  *
6137  * Return:void
6138  */
6139 static inline void
6140 dp_print_rx_rates(struct dp_vdev *vdev)
6141 {
6142 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6143 	uint8_t i;
6144 	uint8_t index = 0;
6145 	char nss[DP_NSS_LENGTH];
6146 
6147 	DP_PRINT_STATS("Rx Rate Info:\n");
6148 	dp_print_common_rates_info(pdev->stats.rx.pkt_type);
6149 
6150 
6151 	index = 0;
6152 	for (i = 0; i < SS_COUNT; i++) {
6153 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6154 				" %d", pdev->stats.rx.nss[i]);
6155 	}
6156 	DP_PRINT_STATS("NSS(1-8) = %s",
6157 			nss);
6158 
6159 	DP_PRINT_STATS("SGI ="
6160 			" 0.8us %d,"
6161 			" 0.4us %d,"
6162 			" 1.6us %d,"
6163 			" 3.2us %d,",
6164 			pdev->stats.rx.sgi_count[0],
6165 			pdev->stats.rx.sgi_count[1],
6166 			pdev->stats.rx.sgi_count[2],
6167 			pdev->stats.rx.sgi_count[3]);
6168 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
6169 			pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
6170 			pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
6171 	DP_PRINT_STATS("Reception Type ="
6172 			" SU: %d,"
6173 			" MU_MIMO:%d,"
6174 			" MU_OFDMA:%d,"
6175 			" MU_OFDMA_MIMO:%d\n",
6176 			pdev->stats.rx.reception_type[0],
6177 			pdev->stats.rx.reception_type[1],
6178 			pdev->stats.rx.reception_type[2],
6179 			pdev->stats.rx.reception_type[3]);
6180 	DP_PRINT_STATS("Aggregation:\n");
6181 	DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
6182 			pdev->stats.rx.ampdu_cnt);
6183 	DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
6184 			pdev->stats.rx.non_ampdu_cnt);
6185 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
6186 			pdev->stats.rx.amsdu_cnt);
6187 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
6188 			pdev->stats.rx.non_amsdu_cnt);
6189 }
6190 
6191 /**
6192  * dp_print_tx_rates(): Print tx rates
6193  * @vdev: DP_VDEV handle
6194  *
6195  * Return:void
6196  */
6197 static inline void
6198 dp_print_tx_rates(struct dp_vdev *vdev)
6199 {
6200 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6201 	uint8_t index;
6202 	char nss[DP_NSS_LENGTH];
6203 	int nss_index;
6204 
6205 	DP_PRINT_STATS("Tx Rate Info:\n");
6206 	dp_print_common_rates_info(pdev->stats.tx.pkt_type);
6207 
6208 	DP_PRINT_STATS("SGI ="
6209 			" 0.8us %d"
6210 			" 0.4us %d"
6211 			" 1.6us %d"
6212 			" 3.2us %d",
6213 			pdev->stats.tx.sgi_count[0],
6214 			pdev->stats.tx.sgi_count[1],
6215 			pdev->stats.tx.sgi_count[2],
6216 			pdev->stats.tx.sgi_count[3]);
6217 
6218 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
6219 			pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
6220 			pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
6221 
6222 	index = 0;
6223 	for (nss_index = 0; nss_index < SS_COUNT; nss_index++) {
6224 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6225 				" %d", pdev->stats.tx.nss[nss_index]);
6226 	}
6227 
6228 	DP_PRINT_STATS("NSS(1-8) = %s", nss);
6229 	DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
6230 	DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
6231 	DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
6232 	DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
6233 	DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
6234 
6235 	DP_PRINT_STATS("Aggregation:\n");
6236 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
6237 			pdev->stats.tx.amsdu_cnt);
6238 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
6239 			pdev->stats.tx.non_amsdu_cnt);
6240 }
6241 
6242 /**
6243  * dp_print_peer_stats():print peer stats
6244  * @peer: DP_PEER handle
6245  *
6246  * return void
6247  */
6248 static inline void dp_print_peer_stats(struct dp_peer *peer)
6249 {
6250 	uint8_t i;
6251 	uint32_t index;
6252 	char nss[DP_NSS_LENGTH];
6253 	DP_PRINT_STATS("Node Tx Stats:\n");
6254 	DP_PRINT_STATS("Total Packet Completions = %d",
6255 			peer->stats.tx.comp_pkt.num);
6256 	DP_PRINT_STATS("Total Bytes Completions = %llu",
6257 			peer->stats.tx.comp_pkt.bytes);
6258 	DP_PRINT_STATS("Success Packets = %d",
6259 			peer->stats.tx.tx_success.num);
6260 	DP_PRINT_STATS("Success Bytes = %llu",
6261 			peer->stats.tx.tx_success.bytes);
6262 	DP_PRINT_STATS("Unicast Success Packets = %d",
6263 			peer->stats.tx.ucast.num);
6264 	DP_PRINT_STATS("Unicast Success Bytes = %llu",
6265 			peer->stats.tx.ucast.bytes);
6266 	DP_PRINT_STATS("Multicast Success Packets = %d",
6267 			peer->stats.tx.mcast.num);
6268 	DP_PRINT_STATS("Multicast Success Bytes = %llu",
6269 			peer->stats.tx.mcast.bytes);
6270 	DP_PRINT_STATS("Broadcast Success Packets = %d",
6271 			peer->stats.tx.bcast.num);
6272 	DP_PRINT_STATS("Broadcast Success Bytes = %llu",
6273 			peer->stats.tx.bcast.bytes);
6274 	DP_PRINT_STATS("Packets Failed = %d",
6275 			peer->stats.tx.tx_failed);
6276 	DP_PRINT_STATS("Packets In OFDMA = %d",
6277 			peer->stats.tx.ofdma);
6278 	DP_PRINT_STATS("Packets In STBC = %d",
6279 			peer->stats.tx.stbc);
6280 	DP_PRINT_STATS("Packets In LDPC = %d",
6281 			peer->stats.tx.ldpc);
6282 	DP_PRINT_STATS("Packet Retries = %d",
6283 			peer->stats.tx.retries);
6284 	DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
6285 			peer->stats.tx.amsdu_cnt);
6286 	DP_PRINT_STATS("Last Packet RSSI = %d",
6287 			peer->stats.tx.last_ack_rssi);
6288 	DP_PRINT_STATS("Dropped At FW: Removed = %d",
6289 			peer->stats.tx.dropped.fw_rem);
6290 	DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
6291 			peer->stats.tx.dropped.fw_rem_tx);
6292 	DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
6293 			peer->stats.tx.dropped.fw_rem_notx);
6294 	DP_PRINT_STATS("Dropped : Age Out = %d",
6295 			peer->stats.tx.dropped.age_out);
6296 	DP_PRINT_STATS("NAWDS : ");
6297 	DP_PRINT_STATS("	Nawds multicast Drop Tx Packet = %d",
6298 			peer->stats.tx.nawds_mcast_drop);
6299 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Count = %d",
6300 			peer->stats.tx.nawds_mcast.num);
6301 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Bytes = %llu",
6302 			peer->stats.tx.nawds_mcast.bytes);
6303 
6304 	DP_PRINT_STATS("Rate Info:");
6305 	dp_print_common_rates_info(peer->stats.tx.pkt_type);
6306 
6307 
6308 	DP_PRINT_STATS("SGI = "
6309 			" 0.8us %d"
6310 			" 0.4us %d"
6311 			" 1.6us %d"
6312 			" 3.2us %d",
6313 			peer->stats.tx.sgi_count[0],
6314 			peer->stats.tx.sgi_count[1],
6315 			peer->stats.tx.sgi_count[2],
6316 			peer->stats.tx.sgi_count[3]);
6317 	DP_PRINT_STATS("Excess Retries per AC ");
6318 	DP_PRINT_STATS("	 Best effort = %d",
6319 			peer->stats.tx.excess_retries_per_ac[0]);
6320 	DP_PRINT_STATS("	 Background= %d",
6321 			peer->stats.tx.excess_retries_per_ac[1]);
6322 	DP_PRINT_STATS("	 Video = %d",
6323 			peer->stats.tx.excess_retries_per_ac[2]);
6324 	DP_PRINT_STATS("	 Voice = %d",
6325 			peer->stats.tx.excess_retries_per_ac[3]);
6326 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
6327 			peer->stats.tx.bw[2], peer->stats.tx.bw[3],
6328 			peer->stats.tx.bw[4], peer->stats.tx.bw[5]);
6329 
6330 	index = 0;
6331 	for (i = 0; i < SS_COUNT; i++) {
6332 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6333 				" %d", peer->stats.tx.nss[i]);
6334 	}
6335 	DP_PRINT_STATS("NSS(1-8) = %s",
6336 			nss);
6337 
6338 	DP_PRINT_STATS("Aggregation:");
6339 	DP_PRINT_STATS("	Number of Msdu's Part of Amsdu = %d",
6340 			peer->stats.tx.amsdu_cnt);
6341 	DP_PRINT_STATS("	Number of Msdu's With No Msdu Level Aggregation = %d\n",
6342 			peer->stats.tx.non_amsdu_cnt);
6343 
6344 	DP_PRINT_STATS("Bytes and Packets transmitted  in last one sec:");
6345 	DP_PRINT_STATS("	Bytes transmitted in last sec: %d",
6346 		       peer->stats.tx.tx_byte_rate);
6347 	DP_PRINT_STATS("	Data transmitted in last sec: %d",
6348 		       peer->stats.tx.tx_data_rate);
6349 
6350 	DP_PRINT_STATS("Node Rx Stats:");
6351 	DP_PRINT_STATS("Packets Sent To Stack = %d",
6352 			peer->stats.rx.to_stack.num);
6353 	DP_PRINT_STATS("Bytes Sent To Stack = %llu",
6354 			peer->stats.rx.to_stack.bytes);
6355 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
6356 		DP_PRINT_STATS("Ring Id = %d", i);
6357 		DP_PRINT_STATS("	Packets Received = %d",
6358 				peer->stats.rx.rcvd_reo[i].num);
6359 		DP_PRINT_STATS("	Bytes Received = %llu",
6360 				peer->stats.rx.rcvd_reo[i].bytes);
6361 	}
6362 	DP_PRINT_STATS("Multicast Packets Received = %d",
6363 			peer->stats.rx.multicast.num);
6364 	DP_PRINT_STATS("Multicast Bytes Received = %llu",
6365 			peer->stats.rx.multicast.bytes);
6366 	DP_PRINT_STATS("Broadcast Packets Received = %d",
6367 			peer->stats.rx.bcast.num);
6368 	DP_PRINT_STATS("Broadcast Bytes Received = %llu",
6369 			peer->stats.rx.bcast.bytes);
6370 	DP_PRINT_STATS("Intra BSS Packets Received = %d",
6371 			peer->stats.rx.intra_bss.pkts.num);
6372 	DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
6373 			peer->stats.rx.intra_bss.pkts.bytes);
6374 	DP_PRINT_STATS("Raw Packets Received = %d",
6375 			peer->stats.rx.raw.num);
6376 	DP_PRINT_STATS("Raw Bytes Received = %llu",
6377 			peer->stats.rx.raw.bytes);
6378 	DP_PRINT_STATS("Errors: MIC Errors = %d",
6379 			peer->stats.rx.err.mic_err);
6380 	DP_PRINT_STATS("Erros: Decryption Errors = %d",
6381 			peer->stats.rx.err.decrypt_err);
6382 	DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
6383 			peer->stats.rx.non_ampdu_cnt);
6384 	DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
6385 			peer->stats.rx.ampdu_cnt);
6386 	DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
6387 			peer->stats.rx.non_amsdu_cnt);
6388 	DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
6389 			peer->stats.rx.amsdu_cnt);
6390 	DP_PRINT_STATS("NAWDS : ");
6391 	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet = %d",
6392 			peer->stats.rx.nawds_mcast_drop);
6393 	DP_PRINT_STATS("SGI ="
6394 			" 0.8us %d"
6395 			" 0.4us %d"
6396 			" 1.6us %d"
6397 			" 3.2us %d",
6398 			peer->stats.rx.sgi_count[0],
6399 			peer->stats.rx.sgi_count[1],
6400 			peer->stats.rx.sgi_count[2],
6401 			peer->stats.rx.sgi_count[3]);
6402 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
6403 			peer->stats.rx.bw[0], peer->stats.rx.bw[1],
6404 			peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
6405 	DP_PRINT_STATS("Reception Type ="
6406 			" SU %d,"
6407 			" MU_MIMO %d,"
6408 			" MU_OFDMA %d,"
6409 			" MU_OFDMA_MIMO %d",
6410 			peer->stats.rx.reception_type[0],
6411 			peer->stats.rx.reception_type[1],
6412 			peer->stats.rx.reception_type[2],
6413 			peer->stats.rx.reception_type[3]);
6414 
6415 	dp_print_common_rates_info(peer->stats.rx.pkt_type);
6416 
6417 	index = 0;
6418 	for (i = 0; i < SS_COUNT; i++) {
6419 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6420 				" %d", peer->stats.rx.nss[i]);
6421 	}
6422 	DP_PRINT_STATS("NSS(1-8) = %s",
6423 			nss);
6424 
6425 	DP_PRINT_STATS("Aggregation:");
6426 	DP_PRINT_STATS("	Msdu's Part of Ampdu = %d",
6427 			peer->stats.rx.ampdu_cnt);
6428 	DP_PRINT_STATS("	Msdu's With No Mpdu Level Aggregation = %d",
6429 			peer->stats.rx.non_ampdu_cnt);
6430 	DP_PRINT_STATS("	Msdu's Part of Amsdu = %d",
6431 			peer->stats.rx.amsdu_cnt);
6432 	DP_PRINT_STATS("	Msdu's With No Msdu Level Aggregation = %d",
6433 			peer->stats.rx.non_amsdu_cnt);
6434 
6435 	DP_PRINT_STATS("Bytes and Packets received in last one sec:");
6436 	DP_PRINT_STATS("	Bytes received in last sec: %d",
6437 		       peer->stats.rx.rx_byte_rate);
6438 	DP_PRINT_STATS("	Data received in last sec: %d",
6439 		       peer->stats.rx.rx_data_rate);
6440 }
6441 
6442 /*
6443  * dp_get_host_peer_stats()- function to print peer stats
6444  * @pdev_handle: DP_PDEV handle
6445  * @mac_addr: mac address of the peer
6446  *
6447  * Return: void
6448  */
6449 static void
6450 dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
6451 {
6452 	struct dp_peer *peer;
6453 	uint8_t local_id;
6454 
6455 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
6456 			&local_id);
6457 
6458 	if (!peer) {
6459 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6460 			  "%s: Invalid peer\n", __func__);
6461 		return;
6462 	}
6463 
6464 	dp_print_peer_stats(peer);
6465 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
6466 }
6467 
6468 /**
6469  * dp_print_host_stats()- Function to print the stats aggregated at host
6470  * @vdev_handle: DP_VDEV handle
6471  * @type: host stats type
6472  *
6473  * Available Stat types
6474  * TXRX_CLEAR_STATS  : Clear the stats
6475  * TXRX_RX_RATE_STATS: Print Rx Rate Info
6476  * TXRX_TX_RATE_STATS: Print Tx Rate Info
6477  * TXRX_TX_HOST_STATS: Print Tx Stats
6478  * TXRX_RX_HOST_STATS: Print Rx Stats
6479  * TXRX_AST_STATS: Print AST Stats
6480  * TXRX_SRNG_PTR_STATS: Print SRNG ring pointer stats
6481  *
6482  * Return: 0 on success, print error message in case of failure
6483  */
6484 static int
6485 dp_print_host_stats(struct cdp_vdev *vdev_handle,
6486 		    struct cdp_txrx_stats_req *req)
6487 {
6488 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6489 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6490 	enum cdp_host_txrx_stats type =
6491 			dp_stats_mapping_table[req->stats][STATS_HOST];
6492 
6493 	dp_aggregate_pdev_stats(pdev);
6494 
6495 	switch (type) {
6496 	case TXRX_CLEAR_STATS:
6497 		dp_txrx_host_stats_clr(vdev);
6498 		break;
6499 	case TXRX_RX_RATE_STATS:
6500 		dp_print_rx_rates(vdev);
6501 		break;
6502 	case TXRX_TX_RATE_STATS:
6503 		dp_print_tx_rates(vdev);
6504 		break;
6505 	case TXRX_TX_HOST_STATS:
6506 		dp_print_pdev_tx_stats(pdev);
6507 		dp_print_soc_tx_stats(pdev->soc);
6508 		break;
6509 	case TXRX_RX_HOST_STATS:
6510 		dp_print_pdev_rx_stats(pdev);
6511 		dp_print_soc_rx_stats(pdev->soc);
6512 		break;
6513 	case TXRX_AST_STATS:
6514 		dp_print_ast_stats(pdev->soc);
6515 		dp_print_peer_table(vdev);
6516 		break;
6517 	case TXRX_SRNG_PTR_STATS:
6518 		dp_print_ring_stats(pdev);
6519 		break;
6520 	case TXRX_RX_MON_STATS:
6521 		dp_print_pdev_rx_mon_stats(pdev);
6522 		break;
6523 	case TXRX_REO_QUEUE_STATS:
6524 		dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
6525 		break;
6526 	default:
6527 		DP_TRACE(FATAL, "Wrong Input For TxRx Host Stats");
6528 		break;
6529 	}
6530 	return 0;
6531 }
6532 
6533 /*
6534  * dp_ppdu_ring_reset()- Reset PPDU Stats ring
6535  * @pdev: DP_PDEV handle
6536  *
6537  * Return: void
6538  */
6539 static void
6540 dp_ppdu_ring_reset(struct dp_pdev *pdev)
6541 {
6542 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
6543 	int mac_id;
6544 
6545 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
6546 
6547 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6548 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6549 							pdev->pdev_id);
6550 
6551 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6552 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6553 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6554 	}
6555 }
6556 
6557 /*
6558  * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
6559  * @pdev: DP_PDEV handle
6560  *
6561  * Return: void
6562  */
6563 static void
6564 dp_ppdu_ring_cfg(struct dp_pdev *pdev)
6565 {
6566 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
6567 	int mac_id;
6568 
6569 	htt_tlv_filter.mpdu_start = 1;
6570 	htt_tlv_filter.msdu_start = 0;
6571 	htt_tlv_filter.packet = 0;
6572 	htt_tlv_filter.msdu_end = 0;
6573 	htt_tlv_filter.mpdu_end = 0;
6574 	htt_tlv_filter.attention = 0;
6575 	htt_tlv_filter.ppdu_start = 1;
6576 	htt_tlv_filter.ppdu_end = 1;
6577 	htt_tlv_filter.ppdu_end_user_stats = 1;
6578 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6579 	htt_tlv_filter.ppdu_end_status_done = 1;
6580 	htt_tlv_filter.enable_fp = 1;
6581 	htt_tlv_filter.enable_md = 0;
6582 	if (pdev->neighbour_peers_added &&
6583 	    pdev->soc->hw_nac_monitor_support) {
6584 		htt_tlv_filter.enable_md = 1;
6585 		htt_tlv_filter.packet_header = 1;
6586 	}
6587 	if (pdev->mcopy_mode) {
6588 		htt_tlv_filter.packet_header = 1;
6589 		htt_tlv_filter.enable_mo = 1;
6590 	}
6591 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6592 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6593 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6594 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6595 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6596 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
6597 	if (pdev->neighbour_peers_added &&
6598 	    pdev->soc->hw_nac_monitor_support)
6599 		htt_tlv_filter.md_data_filter = FILTER_DATA_ALL;
6600 
6601 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6602 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6603 						pdev->pdev_id);
6604 
6605 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6606 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6607 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6608 	}
6609 }
6610 
6611 /*
6612  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
6613  *                              modes are enabled or not.
6614  * @dp_pdev: dp pdev handle.
6615  *
6616  * Return: bool
6617  */
6618 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
6619 {
6620 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
6621 	    !pdev->mcopy_mode)
6622 		return true;
6623 	else
6624 		return false;
6625 }
6626 
6627 /*
6628  *dp_set_bpr_enable() - API to enable/disable bpr feature
6629  *@pdev_handle: DP_PDEV handle.
6630  *@val: Provided value.
6631  *
6632  *Return: void
6633  */
6634 static void
6635 dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
6636 {
6637 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6638 
6639 	switch (val) {
6640 	case CDP_BPR_DISABLE:
6641 		pdev->bpr_enable = CDP_BPR_DISABLE;
6642 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6643 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
6644 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6645 		} else if (pdev->enhanced_stats_en &&
6646 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6647 			   !pdev->pktlog_ppdu_stats) {
6648 			dp_h2t_cfg_stats_msg_send(pdev,
6649 						  DP_PPDU_STATS_CFG_ENH_STATS,
6650 						  pdev->pdev_id);
6651 		}
6652 		break;
6653 	case CDP_BPR_ENABLE:
6654 		pdev->bpr_enable = CDP_BPR_ENABLE;
6655 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
6656 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
6657 			dp_h2t_cfg_stats_msg_send(pdev,
6658 						  DP_PPDU_STATS_CFG_BPR,
6659 						  pdev->pdev_id);
6660 		} else if (pdev->enhanced_stats_en &&
6661 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6662 			   !pdev->pktlog_ppdu_stats) {
6663 			dp_h2t_cfg_stats_msg_send(pdev,
6664 						  DP_PPDU_STATS_CFG_BPR_ENH,
6665 						  pdev->pdev_id);
6666 		} else if (pdev->pktlog_ppdu_stats) {
6667 			dp_h2t_cfg_stats_msg_send(pdev,
6668 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
6669 						  pdev->pdev_id);
6670 		}
6671 		break;
6672 	default:
6673 		break;
6674 	}
6675 }
6676 
6677 /*
6678  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
6679  * @pdev_handle: DP_PDEV handle
6680  * @val: user provided value
6681  *
6682  * Return: void
6683  */
6684 static void
6685 dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
6686 {
6687 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6688 
6689 	switch (val) {
6690 	case 0:
6691 		pdev->tx_sniffer_enable = 0;
6692 		pdev->mcopy_mode = 0;
6693 
6694 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6695 		    !pdev->bpr_enable) {
6696 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6697 			dp_ppdu_ring_reset(pdev);
6698 		} else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
6699 			dp_h2t_cfg_stats_msg_send(pdev,
6700 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
6701 		} else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
6702 			dp_h2t_cfg_stats_msg_send(pdev,
6703 						  DP_PPDU_STATS_CFG_BPR_ENH,
6704 						  pdev->pdev_id);
6705 		} else {
6706 			dp_h2t_cfg_stats_msg_send(pdev,
6707 						  DP_PPDU_STATS_CFG_BPR,
6708 						  pdev->pdev_id);
6709 		}
6710 		break;
6711 
6712 	case 1:
6713 		pdev->tx_sniffer_enable = 1;
6714 		pdev->mcopy_mode = 0;
6715 
6716 		if (!pdev->pktlog_ppdu_stats)
6717 			dp_h2t_cfg_stats_msg_send(pdev,
6718 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
6719 		break;
6720 	case 2:
6721 		pdev->mcopy_mode = 1;
6722 		pdev->tx_sniffer_enable = 0;
6723 		dp_ppdu_ring_cfg(pdev);
6724 
6725 		if (!pdev->pktlog_ppdu_stats)
6726 			dp_h2t_cfg_stats_msg_send(pdev,
6727 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
6728 		break;
6729 	default:
6730 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6731 			"Invalid value");
6732 		break;
6733 	}
6734 }
6735 
6736 /*
6737  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
6738  * @pdev_handle: DP_PDEV handle
6739  *
6740  * Return: void
6741  */
6742 static void
6743 dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
6744 {
6745 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6746 
6747 	if (pdev->enhanced_stats_en == 0)
6748 		dp_cal_client_timer_start(pdev->cal_client_ctx);
6749 
6750 	pdev->enhanced_stats_en = 1;
6751 
6752 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
6753 	    !pdev->monitor_vdev)
6754 		dp_ppdu_ring_cfg(pdev);
6755 
6756 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
6757 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
6758 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
6759 		dp_h2t_cfg_stats_msg_send(pdev,
6760 					  DP_PPDU_STATS_CFG_BPR_ENH,
6761 					  pdev->pdev_id);
6762 	}
6763 }
6764 
6765 /*
6766  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
6767  * @pdev_handle: DP_PDEV handle
6768  *
6769  * Return: void
6770  */
6771 static void
6772 dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
6773 {
6774 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6775 
6776 	if (pdev->enhanced_stats_en == 1)
6777 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
6778 
6779 	pdev->enhanced_stats_en = 0;
6780 
6781 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
6782 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6783 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
6784 		dp_h2t_cfg_stats_msg_send(pdev,
6785 					  DP_PPDU_STATS_CFG_BPR,
6786 					  pdev->pdev_id);
6787 	}
6788 
6789 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
6790 	    !pdev->monitor_vdev)
6791 		dp_ppdu_ring_reset(pdev);
6792 }
6793 
6794 /*
6795  * dp_get_fw_peer_stats()- function to print peer stats
6796  * @pdev_handle: DP_PDEV handle
6797  * @mac_addr: mac address of the peer
6798  * @cap: Type of htt stats requested
6799  *
6800  * Currently Supporting only MAC ID based requests Only
6801  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
6802  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
6803  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
6804  *
6805  * Return: void
6806  */
6807 static void
6808 dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
6809 		uint32_t cap)
6810 {
6811 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6812 	int i;
6813 	uint32_t config_param0 = 0;
6814 	uint32_t config_param1 = 0;
6815 	uint32_t config_param2 = 0;
6816 	uint32_t config_param3 = 0;
6817 
6818 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
6819 	config_param0 |= (1 << (cap + 1));
6820 
6821 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
6822 		config_param1 |= (1 << i);
6823 	}
6824 
6825 	config_param2 |= (mac_addr[0] & 0x000000ff);
6826 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
6827 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
6828 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
6829 
6830 	config_param3 |= (mac_addr[4] & 0x000000ff);
6831 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
6832 
6833 	dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
6834 			config_param0, config_param1, config_param2,
6835 			config_param3, 0, 0, 0);
6836 
6837 }
6838 
6839 /* This struct definition will be removed from here
6840  * once it get added in FW headers*/
6841 struct httstats_cmd_req {
6842     uint32_t    config_param0;
6843     uint32_t    config_param1;
6844     uint32_t    config_param2;
6845     uint32_t    config_param3;
6846     int cookie;
6847     u_int8_t    stats_id;
6848 };
6849 
6850 /*
6851  * dp_get_htt_stats: function to process the httstas request
6852  * @pdev_handle: DP pdev handle
6853  * @data: pointer to request data
6854  * @data_len: length for request data
6855  *
6856  * return: void
6857  */
6858 static void
6859 dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
6860 {
6861 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6862 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
6863 
6864 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
6865 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
6866 				req->config_param0, req->config_param1,
6867 				req->config_param2, req->config_param3,
6868 				req->cookie, 0, 0);
6869 }
6870 
6871 /*
6872  * dp_set_pdev_param: function to set parameters in pdev
6873  * @pdev_handle: DP pdev handle
6874  * @param: parameter type to be set
6875  * @val: value of parameter to be set
6876  *
6877  * return: void
6878  */
6879 static void dp_set_pdev_param(struct cdp_pdev *pdev_handle,
6880 		enum cdp_pdev_param_type param, uint8_t val)
6881 {
6882 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6883 	switch (param) {
6884 	case CDP_CONFIG_DEBUG_SNIFFER:
6885 		dp_config_debug_sniffer(pdev_handle, val);
6886 		break;
6887 	case CDP_CONFIG_BPR_ENABLE:
6888 		dp_set_bpr_enable(pdev_handle, val);
6889 		break;
6890 	case CDP_CONFIG_PRIMARY_RADIO:
6891 		pdev->is_primary = val;
6892 		break;
6893 	default:
6894 		break;
6895 	}
6896 }
6897 
6898 /*
6899  * dp_set_vdev_param: function to set parameters in vdev
6900  * @param: parameter type to be set
6901  * @val: value of parameter to be set
6902  *
6903  * return: void
6904  */
6905 static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
6906 		enum cdp_vdev_param_type param, uint32_t val)
6907 {
6908 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6909 	switch (param) {
6910 	case CDP_ENABLE_WDS:
6911 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6912 			  "wds_enable %d for vdev(%p) id(%d)\n",
6913 			  val, vdev, vdev->vdev_id);
6914 		vdev->wds_enabled = val;
6915 		break;
6916 	case CDP_ENABLE_NAWDS:
6917 		vdev->nawds_enabled = val;
6918 		break;
6919 	case CDP_ENABLE_MCAST_EN:
6920 		vdev->mcast_enhancement_en = val;
6921 		break;
6922 	case CDP_ENABLE_PROXYSTA:
6923 		vdev->proxysta_vdev = val;
6924 		break;
6925 	case CDP_UPDATE_TDLS_FLAGS:
6926 		vdev->tdls_link_connected = val;
6927 		break;
6928 	case CDP_CFG_WDS_AGING_TIMER:
6929 		if (val == 0)
6930 			qdf_timer_stop(&vdev->pdev->soc->wds_aging_timer);
6931 		else if (val != vdev->wds_aging_timer_val)
6932 			qdf_timer_mod(&vdev->pdev->soc->wds_aging_timer, val);
6933 
6934 		vdev->wds_aging_timer_val = val;
6935 		break;
6936 	case CDP_ENABLE_AP_BRIDGE:
6937 		if (wlan_op_mode_sta != vdev->opmode)
6938 			vdev->ap_bridge_enabled = val;
6939 		else
6940 			vdev->ap_bridge_enabled = false;
6941 		break;
6942 	case CDP_ENABLE_CIPHER:
6943 		vdev->sec_type = val;
6944 		break;
6945 	case CDP_ENABLE_QWRAP_ISOLATION:
6946 		vdev->isolation_vdev = val;
6947 		break;
6948 	default:
6949 		break;
6950 	}
6951 
6952 	dp_tx_vdev_update_search_flags(vdev);
6953 }
6954 
6955 /**
6956  * dp_peer_set_nawds: set nawds bit in peer
6957  * @peer_handle: pointer to peer
6958  * @value: enable/disable nawds
6959  *
6960  * return: void
6961  */
6962 static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
6963 {
6964 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6965 	peer->nawds_enabled = value;
6966 }
6967 
6968 /*
6969  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
6970  * @vdev_handle: DP_VDEV handle
6971  * @map_id:ID of map that needs to be updated
6972  *
6973  * Return: void
6974  */
6975 static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
6976 		uint8_t map_id)
6977 {
6978 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6979 	vdev->dscp_tid_map_id = map_id;
6980 	return;
6981 }
6982 
6983 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
6984  * @peer_handle: DP_PEER handle
6985  *
6986  * return : cdp_peer_stats pointer
6987  */
6988 static struct cdp_peer_stats*
6989 		dp_txrx_get_peer_stats(struct cdp_peer *peer_handle)
6990 {
6991 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6992 
6993 	qdf_assert(peer);
6994 
6995 	return &peer->stats;
6996 }
6997 
6998 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
6999  * @peer_handle: DP_PEER handle
7000  *
7001  * return : void
7002  */
7003 static void dp_txrx_reset_peer_stats(struct cdp_peer *peer_handle)
7004 {
7005 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
7006 
7007 	qdf_assert(peer);
7008 
7009 	qdf_mem_set(&peer->stats, sizeof(peer->stats), 0);
7010 }
7011 
7012 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
7013  * @vdev_handle: DP_VDEV handle
7014  * @buf: buffer for vdev stats
7015  *
7016  * return : int
7017  */
7018 static int  dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
7019 				   bool is_aggregate)
7020 {
7021 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7022 	struct cdp_vdev_stats *vdev_stats = (struct cdp_vdev_stats *)buf;
7023 
7024 	if (is_aggregate)
7025 		dp_aggregate_vdev_stats(vdev, buf);
7026 	else
7027 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
7028 
7029 	return 0;
7030 }
7031 
7032 /*
7033  * dp_txrx_stats_publish(): publish pdev stats into a buffer
7034  * @pdev_handle: DP_PDEV handle
7035  * @buf: to hold pdev_stats
7036  *
7037  * Return: int
7038  */
7039 static int
7040 dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
7041 {
7042 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7043 	struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
7044 	struct cdp_txrx_stats_req req = {0,};
7045 
7046 	dp_aggregate_pdev_stats(pdev);
7047 	req.stats = HTT_DBG_EXT_STATS_PDEV_TX;
7048 	req.cookie_val = 1;
7049 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
7050 				req.param1, req.param2, req.param3, 0,
7051 				req.cookie_val, 0);
7052 
7053 	msleep(DP_MAX_SLEEP_TIME);
7054 
7055 	req.stats = HTT_DBG_EXT_STATS_PDEV_RX;
7056 	req.cookie_val = 1;
7057 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
7058 				req.param1, req.param2, req.param3, 0,
7059 				req.cookie_val, 0);
7060 
7061 	msleep(DP_MAX_SLEEP_TIME);
7062 	qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
7063 
7064 	return TXRX_STATS_LEVEL;
7065 }
7066 
7067 /**
7068  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
7069  * @pdev: DP_PDEV handle
7070  * @map_id: ID of map that needs to be updated
7071  * @tos: index value in map
7072  * @tid: tid value passed by the user
7073  *
7074  * Return: void
7075  */
7076 static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
7077 		uint8_t map_id, uint8_t tos, uint8_t tid)
7078 {
7079 	uint8_t dscp;
7080 	struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
7081 	struct dp_soc *soc = pdev->soc;
7082 
7083 	if (!soc)
7084 		return;
7085 
7086 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
7087 	pdev->dscp_tid_map[map_id][dscp] = tid;
7088 
7089 	if (map_id < soc->num_hw_dscp_tid_map)
7090 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
7091 				       map_id, dscp);
7092 	return;
7093 }
7094 
7095 /**
7096  * dp_fw_stats_process(): Process TxRX FW stats request
7097  * @vdev_handle: DP VDEV handle
7098  * @req: stats request
7099  *
7100  * return: int
7101  */
7102 static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
7103 		struct cdp_txrx_stats_req *req)
7104 {
7105 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7106 	struct dp_pdev *pdev = NULL;
7107 	uint32_t stats = req->stats;
7108 	uint8_t mac_id = req->mac_id;
7109 
7110 	if (!vdev) {
7111 		DP_TRACE(NONE, "VDEV not found");
7112 		return 1;
7113 	}
7114 	pdev = vdev->pdev;
7115 
7116 	/*
7117 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
7118 	 * from param0 to param3 according to below rule:
7119 	 *
7120 	 * PARAM:
7121 	 *   - config_param0 : start_offset (stats type)
7122 	 *   - config_param1 : stats bmask from start offset
7123 	 *   - config_param2 : stats bmask from start offset + 32
7124 	 *   - config_param3 : stats bmask from start offset + 64
7125 	 */
7126 	if (req->stats == CDP_TXRX_STATS_0) {
7127 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
7128 		req->param1 = 0xFFFFFFFF;
7129 		req->param2 = 0xFFFFFFFF;
7130 		req->param3 = 0xFFFFFFFF;
7131 	}
7132 
7133 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
7134 				req->param1, req->param2, req->param3,
7135 				0, 0, mac_id);
7136 }
7137 
7138 /**
7139  * dp_txrx_stats_request - function to map to firmware and host stats
7140  * @vdev: virtual handle
7141  * @req: stats request
7142  *
7143  * Return: QDF_STATUS
7144  */
7145 static
7146 QDF_STATUS dp_txrx_stats_request(struct cdp_vdev *vdev,
7147 				 struct cdp_txrx_stats_req *req)
7148 {
7149 	int host_stats;
7150 	int fw_stats;
7151 	enum cdp_stats stats;
7152 	int num_stats;
7153 
7154 	if (!vdev || !req) {
7155 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7156 				"Invalid vdev/req instance");
7157 		return QDF_STATUS_E_INVAL;
7158 	}
7159 
7160 	stats = req->stats;
7161 	if (stats >= CDP_TXRX_MAX_STATS)
7162 		return QDF_STATUS_E_INVAL;
7163 
7164 	/*
7165 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
7166 	 *			has to be updated if new FW HTT stats added
7167 	 */
7168 	if (stats > CDP_TXRX_STATS_HTT_MAX)
7169 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
7170 
7171 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
7172 
7173 	if (stats >= num_stats) {
7174 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7175 			  "%s: Invalid stats option: %d", __func__, stats);
7176 		return QDF_STATUS_E_INVAL;
7177 	}
7178 
7179 	req->stats = stats;
7180 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
7181 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
7182 
7183 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7184 		 "stats: %u fw_stats_type: %d host_stats: %d",
7185 		  stats, fw_stats, host_stats);
7186 
7187 	if (fw_stats != TXRX_FW_STATS_INVALID) {
7188 		/* update request with FW stats type */
7189 		req->stats = fw_stats;
7190 		return dp_fw_stats_process(vdev, req);
7191 	}
7192 
7193 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
7194 			(host_stats <= TXRX_HOST_STATS_MAX))
7195 		return dp_print_host_stats(vdev, req);
7196 	else
7197 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7198 				"Wrong Input for TxRx Stats");
7199 
7200 	return QDF_STATUS_SUCCESS;
7201 }
7202 
7203 /*
7204  * dp_print_napi_stats(): NAPI stats
7205  * @soc - soc handle
7206  */
7207 static void dp_print_napi_stats(struct dp_soc *soc)
7208 {
7209 	hif_print_napi_stats(soc->hif_handle);
7210 }
7211 
7212 /*
7213  * dp_print_per_ring_stats(): Packet count per ring
7214  * @soc - soc handle
7215  */
7216 static void dp_print_per_ring_stats(struct dp_soc *soc)
7217 {
7218 	uint8_t ring;
7219 	uint16_t core;
7220 	uint64_t total_packets;
7221 
7222 	DP_TRACE_STATS(INFO_HIGH, "Reo packets per ring:");
7223 	for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
7224 		total_packets = 0;
7225 		DP_TRACE_STATS(INFO_HIGH,
7226 			       "Packets on ring %u:", ring);
7227 		for (core = 0; core < NR_CPUS; core++) {
7228 			DP_TRACE_STATS(INFO_HIGH,
7229 				       "Packets arriving on core %u: %llu",
7230 				       core,
7231 				       soc->stats.rx.ring_packets[core][ring]);
7232 			total_packets += soc->stats.rx.ring_packets[core][ring];
7233 		}
7234 		DP_TRACE_STATS(INFO_HIGH,
7235 			       "Total packets on ring %u: %llu",
7236 			       ring, total_packets);
7237 	}
7238 }
7239 
7240 /*
7241  * dp_txrx_path_stats() - Function to display dump stats
7242  * @soc - soc handle
7243  *
7244  * return: none
7245  */
7246 static void dp_txrx_path_stats(struct dp_soc *soc)
7247 {
7248 	uint8_t error_code;
7249 	uint8_t loop_pdev;
7250 	struct dp_pdev *pdev;
7251 	uint8_t i;
7252 
7253 	if (!soc) {
7254 		DP_TRACE(ERROR, "%s: Invalid access",
7255 			 __func__);
7256 		return;
7257 	}
7258 
7259 	for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
7260 
7261 		pdev = soc->pdev_list[loop_pdev];
7262 		dp_aggregate_pdev_stats(pdev);
7263 		DP_TRACE_STATS(INFO_HIGH, "Tx path Statistics:");
7264 		DP_TRACE_STATS(INFO_HIGH, "from stack: %u msdus (%llu bytes)",
7265 			       pdev->stats.tx_i.rcvd.num,
7266 			       pdev->stats.tx_i.rcvd.bytes);
7267 		DP_TRACE_STATS(INFO_HIGH,
7268 			       "processed from host: %u msdus (%llu bytes)",
7269 			       pdev->stats.tx_i.processed.num,
7270 			       pdev->stats.tx_i.processed.bytes);
7271 		DP_TRACE_STATS(INFO_HIGH,
7272 			       "successfully transmitted: %u msdus (%llu bytes)",
7273 			       pdev->stats.tx.tx_success.num,
7274 			       pdev->stats.tx.tx_success.bytes);
7275 
7276 		DP_TRACE_STATS(INFO_HIGH, "Dropped in host:");
7277 		DP_TRACE_STATS(INFO_HIGH, "Total packets dropped: %u,",
7278 			       pdev->stats.tx_i.dropped.dropped_pkt.num);
7279 		DP_TRACE_STATS(INFO_HIGH, "Descriptor not available: %u",
7280 			       pdev->stats.tx_i.dropped.desc_na.num);
7281 		DP_TRACE_STATS(INFO_HIGH, "Ring full: %u",
7282 			       pdev->stats.tx_i.dropped.ring_full);
7283 		DP_TRACE_STATS(INFO_HIGH, "Enqueue fail: %u",
7284 			       pdev->stats.tx_i.dropped.enqueue_fail);
7285 		DP_TRACE_STATS(INFO_HIGH, "DMA Error: %u",
7286 			       pdev->stats.tx_i.dropped.dma_error);
7287 
7288 		DP_TRACE_STATS(INFO_HIGH, "Dropped in hardware:");
7289 		DP_TRACE_STATS(INFO_HIGH, "total packets dropped: %u",
7290 			       pdev->stats.tx.tx_failed);
7291 		DP_TRACE_STATS(INFO_HIGH, "mpdu age out: %u",
7292 			       pdev->stats.tx.dropped.age_out);
7293 		DP_TRACE_STATS(INFO_HIGH, "firmware removed: %u",
7294 			       pdev->stats.tx.dropped.fw_rem);
7295 		DP_TRACE_STATS(INFO_HIGH, "firmware removed tx: %u",
7296 			       pdev->stats.tx.dropped.fw_rem_tx);
7297 		DP_TRACE_STATS(INFO_HIGH, "firmware removed notx %u",
7298 			       pdev->stats.tx.dropped.fw_rem_notx);
7299 		DP_TRACE_STATS(INFO_HIGH, "peer_invalid: %u",
7300 			       pdev->soc->stats.tx.tx_invalid_peer.num);
7301 
7302 		DP_TRACE_STATS(INFO_HIGH, "Tx packets sent per interrupt:");
7303 		DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
7304 			       pdev->stats.tx_comp_histogram.pkts_1);
7305 		DP_TRACE_STATS(INFO_HIGH, "2-20 Packets:  %u",
7306 			       pdev->stats.tx_comp_histogram.pkts_2_20);
7307 		DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
7308 			       pdev->stats.tx_comp_histogram.pkts_21_40);
7309 		DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
7310 			       pdev->stats.tx_comp_histogram.pkts_41_60);
7311 		DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
7312 			       pdev->stats.tx_comp_histogram.pkts_61_80);
7313 		DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
7314 			       pdev->stats.tx_comp_histogram.pkts_81_100);
7315 		DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
7316 			       pdev->stats.tx_comp_histogram.pkts_101_200);
7317 		DP_TRACE_STATS(INFO_HIGH, "   201+ Packets: %u",
7318 			       pdev->stats.tx_comp_histogram.pkts_201_plus);
7319 
7320 		DP_TRACE_STATS(INFO_HIGH, "Rx path statistics");
7321 
7322 		DP_TRACE_STATS(INFO_HIGH,
7323 			       "delivered %u msdus ( %llu bytes),",
7324 			       pdev->stats.rx.to_stack.num,
7325 			       pdev->stats.rx.to_stack.bytes);
7326 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)
7327 			DP_TRACE_STATS(INFO_HIGH,
7328 				       "received on reo[%d] %u msdus( %llu bytes),",
7329 				       i, pdev->stats.rx.rcvd_reo[i].num,
7330 				       pdev->stats.rx.rcvd_reo[i].bytes);
7331 		DP_TRACE_STATS(INFO_HIGH,
7332 			       "intra-bss packets %u msdus ( %llu bytes),",
7333 			       pdev->stats.rx.intra_bss.pkts.num,
7334 			       pdev->stats.rx.intra_bss.pkts.bytes);
7335 		DP_TRACE_STATS(INFO_HIGH,
7336 			       "intra-bss fails %u msdus ( %llu bytes),",
7337 			       pdev->stats.rx.intra_bss.fail.num,
7338 			       pdev->stats.rx.intra_bss.fail.bytes);
7339 		DP_TRACE_STATS(INFO_HIGH,
7340 			       "raw packets %u msdus ( %llu bytes),",
7341 			       pdev->stats.rx.raw.num,
7342 			       pdev->stats.rx.raw.bytes);
7343 		DP_TRACE_STATS(INFO_HIGH, "dropped: error %u msdus",
7344 			       pdev->stats.rx.err.mic_err);
7345 		DP_TRACE_STATS(INFO_HIGH, "peer invalid %u",
7346 			       pdev->soc->stats.rx.err.rx_invalid_peer.num);
7347 
7348 		DP_TRACE_STATS(INFO_HIGH, "Reo Statistics");
7349 		DP_TRACE_STATS(INFO_HIGH, "rbm error: %u msdus",
7350 			       pdev->soc->stats.rx.err.invalid_rbm);
7351 		DP_TRACE_STATS(INFO_HIGH, "hal ring access fail: %u msdus",
7352 			       pdev->soc->stats.rx.err.hal_ring_access_fail);
7353 
7354 		for (error_code = 0; error_code < HAL_REO_ERR_MAX;
7355 				error_code++) {
7356 			if (!pdev->soc->stats.rx.err.reo_error[error_code])
7357 				continue;
7358 			DP_TRACE_STATS(INFO_HIGH,
7359 				       "Reo error number (%u): %u msdus",
7360 				       error_code,
7361 				       pdev->soc->stats.rx.err
7362 				       .reo_error[error_code]);
7363 		}
7364 
7365 		for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
7366 				error_code++) {
7367 			if (!pdev->soc->stats.rx.err.rxdma_error[error_code])
7368 				continue;
7369 			DP_TRACE_STATS(INFO_HIGH,
7370 				       "Rxdma error number (%u): %u msdus",
7371 				       error_code,
7372 				       pdev->soc->stats.rx.err
7373 				       .rxdma_error[error_code]);
7374 		}
7375 
7376 		DP_TRACE_STATS(INFO_HIGH, "Rx packets reaped per interrupt:");
7377 		DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
7378 			       pdev->stats.rx_ind_histogram.pkts_1);
7379 		DP_TRACE_STATS(INFO_HIGH, "2-20 Packets:  %u",
7380 			       pdev->stats.rx_ind_histogram.pkts_2_20);
7381 		DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
7382 			       pdev->stats.rx_ind_histogram.pkts_21_40);
7383 		DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
7384 			       pdev->stats.rx_ind_histogram.pkts_41_60);
7385 		DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
7386 			       pdev->stats.rx_ind_histogram.pkts_61_80);
7387 		DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
7388 			       pdev->stats.rx_ind_histogram.pkts_81_100);
7389 		DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
7390 			       pdev->stats.rx_ind_histogram.pkts_101_200);
7391 		DP_TRACE_STATS(INFO_HIGH, "   201+ Packets: %u",
7392 			       pdev->stats.rx_ind_histogram.pkts_201_plus);
7393 
7394 		DP_TRACE_STATS(INFO_HIGH, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
7395 			       __func__,
7396 			       pdev->soc->wlan_cfg_ctx
7397 			       ->tso_enabled,
7398 			       pdev->soc->wlan_cfg_ctx
7399 			       ->lro_enabled,
7400 			       pdev->soc->wlan_cfg_ctx
7401 			       ->rx_hash,
7402 			       pdev->soc->wlan_cfg_ctx
7403 			       ->napi_enabled);
7404 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7405 		DP_TRACE_STATS(INFO_HIGH, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
7406 			       __func__,
7407 			       pdev->soc->wlan_cfg_ctx
7408 			       ->tx_flow_stop_queue_threshold,
7409 			       pdev->soc->wlan_cfg_ctx
7410 			       ->tx_flow_start_queue_offset);
7411 #endif
7412 	}
7413 }
7414 
7415 /*
7416  * dp_txrx_dump_stats() -  Dump statistics
7417  * @value - Statistics option
7418  */
7419 static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
7420 				     enum qdf_stats_verbosity_level level)
7421 {
7422 	struct dp_soc *soc =
7423 		(struct dp_soc *)psoc;
7424 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7425 
7426 	if (!soc) {
7427 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7428 			"%s: soc is NULL", __func__);
7429 		return QDF_STATUS_E_INVAL;
7430 	}
7431 
7432 	switch (value) {
7433 	case CDP_TXRX_PATH_STATS:
7434 		dp_txrx_path_stats(soc);
7435 		break;
7436 
7437 	case CDP_RX_RING_STATS:
7438 		dp_print_per_ring_stats(soc);
7439 		break;
7440 
7441 	case CDP_TXRX_TSO_STATS:
7442 		/* TODO: NOT IMPLEMENTED */
7443 		break;
7444 
7445 	case CDP_DUMP_TX_FLOW_POOL_INFO:
7446 		cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
7447 		break;
7448 
7449 	case CDP_DP_NAPI_STATS:
7450 		dp_print_napi_stats(soc);
7451 		break;
7452 
7453 	case CDP_TXRX_DESC_STATS:
7454 		/* TODO: NOT IMPLEMENTED */
7455 		break;
7456 
7457 	default:
7458 		status = QDF_STATUS_E_INVAL;
7459 		break;
7460 	}
7461 
7462 	return status;
7463 
7464 }
7465 
7466 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7467 /**
7468  * dp_update_flow_control_parameters() - API to store datapath
7469  *                            config parameters
7470  * @soc: soc handle
7471  * @cfg: ini parameter handle
7472  *
7473  * Return: void
7474  */
7475 static inline
7476 void dp_update_flow_control_parameters(struct dp_soc *soc,
7477 				struct cdp_config_params *params)
7478 {
7479 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
7480 					params->tx_flow_stop_queue_threshold;
7481 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
7482 					params->tx_flow_start_queue_offset;
7483 }
7484 #else
7485 static inline
7486 void dp_update_flow_control_parameters(struct dp_soc *soc,
7487 				struct cdp_config_params *params)
7488 {
7489 }
7490 #endif
7491 
7492 /**
7493  * dp_update_config_parameters() - API to store datapath
7494  *                            config parameters
7495  * @soc: soc handle
7496  * @cfg: ini parameter handle
7497  *
7498  * Return: status
7499  */
7500 static
7501 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
7502 				struct cdp_config_params *params)
7503 {
7504 	struct dp_soc *soc = (struct dp_soc *)psoc;
7505 
7506 	if (!(soc)) {
7507 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7508 				"%s: Invalid handle", __func__);
7509 		return QDF_STATUS_E_INVAL;
7510 	}
7511 
7512 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
7513 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
7514 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
7515 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
7516 				params->tcp_udp_checksumoffload;
7517 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
7518 	dp_update_flow_control_parameters(soc, params);
7519 
7520 	return QDF_STATUS_SUCCESS;
7521 }
7522 
7523 /**
7524  * dp_txrx_set_wds_rx_policy() - API to store datapath
7525  *                            config parameters
7526  * @vdev_handle - datapath vdev handle
7527  * @cfg: ini parameter handle
7528  *
7529  * Return: status
7530  */
7531 #ifdef WDS_VENDOR_EXTENSION
7532 void
7533 dp_txrx_set_wds_rx_policy(
7534 		struct cdp_vdev *vdev_handle,
7535 		u_int32_t val)
7536 {
7537 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7538 	struct dp_peer *peer;
7539 	if (vdev->opmode == wlan_op_mode_ap) {
7540 		/* for ap, set it on bss_peer */
7541 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
7542 			if (peer->bss_peer) {
7543 				peer->wds_ecm.wds_rx_filter = 1;
7544 				peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7545 				peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7546 				break;
7547 			}
7548 		}
7549 	} else if (vdev->opmode == wlan_op_mode_sta) {
7550 		peer = TAILQ_FIRST(&vdev->peer_list);
7551 		peer->wds_ecm.wds_rx_filter = 1;
7552 		peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7553 		peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7554 	}
7555 }
7556 
7557 /**
7558  * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
7559  *
7560  * @peer_handle - datapath peer handle
7561  * @wds_tx_ucast: policy for unicast transmission
7562  * @wds_tx_mcast: policy for multicast transmission
7563  *
7564  * Return: void
7565  */
7566 void
7567 dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
7568 		int wds_tx_ucast, int wds_tx_mcast)
7569 {
7570 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
7571 	if (wds_tx_ucast || wds_tx_mcast) {
7572 		peer->wds_enabled = 1;
7573 		peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
7574 		peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
7575 	} else {
7576 		peer->wds_enabled = 0;
7577 		peer->wds_ecm.wds_tx_ucast_4addr = 0;
7578 		peer->wds_ecm.wds_tx_mcast_4addr = 0;
7579 	}
7580 
7581 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7582 			FL("Policy Update set to :\
7583 				peer->wds_enabled %d\
7584 				peer->wds_ecm.wds_tx_ucast_4addr %d\
7585 				peer->wds_ecm.wds_tx_mcast_4addr %d"),
7586 				peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
7587 				peer->wds_ecm.wds_tx_mcast_4addr);
7588 	return;
7589 }
7590 #endif
7591 
7592 static struct cdp_wds_ops dp_ops_wds = {
7593 	.vdev_set_wds = dp_vdev_set_wds,
7594 #ifdef WDS_VENDOR_EXTENSION
7595 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
7596 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
7597 #endif
7598 };
7599 
7600 /*
7601  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
7602  * @vdev_handle - datapath vdev handle
7603  * @callback - callback function
7604  * @ctxt: callback context
7605  *
7606  */
7607 static void
7608 dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
7609 		       ol_txrx_data_tx_cb callback, void *ctxt)
7610 {
7611 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7612 
7613 	vdev->tx_non_std_data_callback.func = callback;
7614 	vdev->tx_non_std_data_callback.ctxt = ctxt;
7615 }
7616 
7617 /**
7618  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
7619  * @pdev_hdl: datapath pdev handle
7620  *
7621  * Return: opaque pointer to dp txrx handle
7622  */
7623 static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
7624 {
7625 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7626 
7627 	return pdev->dp_txrx_handle;
7628 }
7629 
7630 /**
7631  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
7632  * @pdev_hdl: datapath pdev handle
7633  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
7634  *
7635  * Return: void
7636  */
7637 static void
7638 dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
7639 {
7640 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7641 
7642 	pdev->dp_txrx_handle = dp_txrx_hdl;
7643 }
7644 
7645 /**
7646  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
7647  * @soc_handle: datapath soc handle
7648  *
7649  * Return: opaque pointer to external dp (non-core DP)
7650  */
7651 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
7652 {
7653 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
7654 
7655 	return soc->external_txrx_handle;
7656 }
7657 
7658 /**
7659  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
7660  * @soc_handle: datapath soc handle
7661  * @txrx_handle: opaque pointer to external dp (non-core DP)
7662  *
7663  * Return: void
7664  */
7665 static void
7666 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
7667 {
7668 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
7669 
7670 	soc->external_txrx_handle = txrx_handle;
7671 }
7672 
7673 /**
7674  * dp_get_cfg_capabilities() - get dp capabilities
7675  * @soc_handle: datapath soc handle
7676  * @dp_caps: enum for dp capabilities
7677  *
7678  * Return: bool to determine if dp caps is enabled
7679  */
7680 static bool
7681 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
7682 			enum cdp_capabilities dp_caps)
7683 {
7684 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
7685 
7686 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
7687 }
7688 
7689 #ifdef FEATURE_AST
7690 static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
7691 {
7692 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
7693 	struct dp_peer *peer = (struct dp_peer *) peer_hdl;
7694 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7695 
7696 	/*
7697 	 * For BSS peer, new peer is not created on alloc_node if the
7698 	 * peer with same address already exists , instead refcnt is
7699 	 * increased for existing peer. Correspondingly in delete path,
7700 	 * only refcnt is decreased; and peer is only deleted , when all
7701 	 * references are deleted. So delete_in_progress should not be set
7702 	 * for bss_peer, unless only 2 reference remains (peer map reference
7703 	 * and peer hash table reference).
7704 	 */
7705 	if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
7706 		return;
7707 	}
7708 
7709 	peer->delete_in_progress = true;
7710 	dp_peer_delete_ast_entries(soc, peer);
7711 }
7712 #endif
7713 
7714 #ifdef ATH_SUPPORT_NAC_RSSI
7715 /**
7716  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
7717  * @vdev_hdl: DP vdev handle
7718  * @rssi: rssi value
7719  *
7720  * Return: 0 for success. nonzero for failure.
7721  */
7722 QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
7723 				       char *mac_addr,
7724 				       uint8_t *rssi)
7725 {
7726 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7727 	struct dp_pdev *pdev = vdev->pdev;
7728 	struct dp_neighbour_peer *peer = NULL;
7729 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
7730 
7731 	*rssi = 0;
7732 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
7733 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
7734 		      neighbour_peer_list_elem) {
7735 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
7736 				mac_addr, DP_MAC_ADDR_LEN) == 0) {
7737 			*rssi = peer->rssi;
7738 			status = QDF_STATUS_SUCCESS;
7739 			break;
7740 		}
7741 	}
7742 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
7743 	return status;
7744 }
7745 
7746 static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
7747 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
7748 		uint8_t chan_num)
7749 {
7750 
7751 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7752 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7753 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7754 
7755 	pdev->nac_rssi_filtering = 1;
7756 	/* Store address of NAC (neighbour peer) which will be checked
7757 	 * against TA of received packets.
7758 	 */
7759 
7760 	if (cmd == CDP_NAC_PARAM_ADD) {
7761 		dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
7762 						 client_macaddr);
7763 	} else if (cmd == CDP_NAC_PARAM_DEL) {
7764 		dp_update_filter_neighbour_peers(vdev_handle,
7765 						 DP_NAC_PARAM_DEL,
7766 						 client_macaddr);
7767 	}
7768 
7769 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
7770 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
7771 			((void *)vdev->pdev->ctrl_pdev,
7772 			 vdev->vdev_id, cmd, bssid);
7773 
7774 	return QDF_STATUS_SUCCESS;
7775 }
7776 #endif
7777 
7778 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
7779 					   uint32_t max_peers,
7780 					   bool peer_map_unmap_v2)
7781 {
7782 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7783 
7784 	soc->max_peers = max_peers;
7785 
7786 	qdf_print ("%s max_peers %u\n", __func__, max_peers);
7787 
7788 	if (dp_peer_find_attach(soc))
7789 		return QDF_STATUS_E_FAILURE;
7790 
7791 	soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
7792 
7793 	return QDF_STATUS_SUCCESS;
7794 }
7795 
7796 /**
7797  * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
7798  * @dp_pdev: dp pdev handle
7799  * @ctrl_pdev: UMAC ctrl pdev handle
7800  *
7801  * Return: void
7802  */
7803 static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
7804 				  struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
7805 {
7806 	struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
7807 
7808 	pdev->ctrl_pdev = ctrl_pdev;
7809 }
7810 
7811 static struct cdp_cmn_ops dp_ops_cmn = {
7812 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
7813 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
7814 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
7815 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
7816 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
7817 	.txrx_peer_create = dp_peer_create_wifi3,
7818 	.txrx_peer_setup = dp_peer_setup_wifi3,
7819 #ifdef FEATURE_AST
7820 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
7821 #else
7822 	.txrx_peer_teardown = NULL,
7823 #endif
7824 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
7825 	.txrx_peer_del_ast = dp_peer_del_ast_wifi3,
7826 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
7827 	.txrx_peer_ast_hash_find = dp_peer_ast_hash_find_wifi3,
7828 	.txrx_peer_ast_get_pdev_id = dp_peer_ast_get_pdev_id_wifi3,
7829 	.txrx_peer_ast_get_next_hop = dp_peer_ast_get_next_hop_wifi3,
7830 	.txrx_peer_ast_set_type = dp_peer_ast_set_type_wifi3,
7831 	.txrx_peer_ast_get_type = dp_peer_ast_get_type_wifi3,
7832 #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND)
7833 	.txrx_peer_ast_set_cp_ctx = dp_peer_ast_set_cp_ctx_wifi3,
7834 	.txrx_peer_ast_get_cp_ctx = dp_peer_ast_get_cp_ctx_wifi3,
7835 	.txrx_peer_ast_get_wmi_sent = dp_peer_ast_get_wmi_sent_wifi3,
7836 	.txrx_peer_ast_free_entry = dp_peer_ast_free_entry_wifi3,
7837 #endif
7838 	.txrx_peer_delete = dp_peer_delete_wifi3,
7839 	.txrx_vdev_register = dp_vdev_register_wifi3,
7840 	.txrx_soc_detach = dp_soc_detach_wifi3,
7841 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
7842 	.txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
7843 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
7844 	.txrx_ath_getstats = dp_get_device_stats,
7845 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
7846 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
7847 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
7848 	.delba_process = dp_delba_process_wifi3,
7849 	.set_addba_response = dp_set_addba_response,
7850 	.get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
7851 	.flush_cache_rx_queue = NULL,
7852 	/* TODO: get API's for dscp-tid need to be added*/
7853 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
7854 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
7855 	.txrx_stats_request = dp_txrx_stats_request,
7856 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
7857 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
7858 	.txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
7859 	.txrx_set_nac = dp_set_nac,
7860 	.txrx_get_tx_pending = dp_get_tx_pending,
7861 	.txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
7862 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
7863 	.display_stats = dp_txrx_dump_stats,
7864 	.txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
7865 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
7866 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
7867 	.txrx_intr_detach = dp_soc_interrupt_detach,
7868 	.set_pn_check = dp_set_pn_check_wifi3,
7869 	.update_config_parameters = dp_update_config_parameters,
7870 	/* TODO: Add other functions */
7871 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
7872 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
7873 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
7874 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
7875 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
7876 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
7877 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
7878 	.tx_send = dp_tx_send,
7879 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
7880 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
7881 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
7882 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
7883 	.txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
7884 	.txrx_get_os_rx_handles_from_vdev =
7885 					dp_get_os_rx_handles_from_vdev_wifi3,
7886 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
7887 	.get_dp_capabilities = dp_get_cfg_capabilities,
7888 };
7889 
7890 static struct cdp_ctrl_ops dp_ops_ctrl = {
7891 	.txrx_peer_authorize = dp_peer_authorize,
7892 #ifdef QCA_SUPPORT_SON
7893 	.txrx_set_inact_params = dp_set_inact_params,
7894 	.txrx_start_inact_timer = dp_start_inact_timer,
7895 	.txrx_set_overload = dp_set_overload,
7896 	.txrx_peer_is_inact = dp_peer_is_inact,
7897 	.txrx_mark_peer_inact = dp_mark_peer_inact,
7898 #endif
7899 	.txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
7900 	.txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
7901 #ifdef MESH_MODE_SUPPORT
7902 	.txrx_set_mesh_mode  = dp_peer_set_mesh_mode,
7903 	.txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
7904 #endif
7905 	.txrx_set_vdev_param = dp_set_vdev_param,
7906 	.txrx_peer_set_nawds = dp_peer_set_nawds,
7907 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
7908 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
7909 	.txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
7910 	.txrx_update_filter_neighbour_peers =
7911 		dp_update_filter_neighbour_peers,
7912 	.txrx_get_sec_type = dp_get_sec_type,
7913 	/* TODO: Add other functions */
7914 	.txrx_wdi_event_sub = dp_wdi_event_sub,
7915 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
7916 #ifdef WDI_EVENT_ENABLE
7917 	.txrx_get_pldev = dp_get_pldev,
7918 #endif
7919 	.txrx_set_pdev_param = dp_set_pdev_param,
7920 #ifdef ATH_SUPPORT_NAC_RSSI
7921 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
7922 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
7923 #endif
7924 	.set_key = dp_set_michael_key,
7925 };
7926 
7927 static struct cdp_me_ops dp_ops_me = {
7928 #ifdef ATH_SUPPORT_IQUE
7929 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
7930 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
7931 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
7932 #endif
7933 };
7934 
7935 static struct cdp_mon_ops dp_ops_mon = {
7936 	.txrx_monitor_set_filter_ucast_data = NULL,
7937 	.txrx_monitor_set_filter_mcast_data = NULL,
7938 	.txrx_monitor_set_filter_non_data = NULL,
7939 	.txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
7940 	.txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
7941 	.txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
7942 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
7943 	/* Added support for HK advance filter */
7944 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
7945 };
7946 
7947 static struct cdp_host_stats_ops dp_ops_host_stats = {
7948 	.txrx_per_peer_stats = dp_get_host_peer_stats,
7949 	.get_fw_peer_stats = dp_get_fw_peer_stats,
7950 	.get_htt_stats = dp_get_htt_stats,
7951 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
7952 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
7953 	.txrx_stats_publish = dp_txrx_stats_publish,
7954 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
7955 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
7956 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
7957 	/* TODO */
7958 };
7959 
7960 static struct cdp_raw_ops dp_ops_raw = {
7961 	/* TODO */
7962 };
7963 
7964 #ifdef CONFIG_WIN
7965 static struct cdp_pflow_ops dp_ops_pflow = {
7966 	/* TODO */
7967 };
7968 #endif /* CONFIG_WIN */
7969 
7970 #ifdef FEATURE_RUNTIME_PM
7971 /**
7972  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
7973  * @opaque_pdev: DP pdev context
7974  *
7975  * DP is ready to runtime suspend if there are no pending TX packets.
7976  *
7977  * Return: QDF_STATUS
7978  */
7979 static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
7980 {
7981 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7982 	struct dp_soc *soc = pdev->soc;
7983 
7984 	/* Abort if there are any pending TX packets */
7985 	if (dp_get_tx_pending(opaque_pdev) > 0) {
7986 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7987 			  FL("Abort suspend due to pending TX packets"));
7988 		return QDF_STATUS_E_AGAIN;
7989 	}
7990 
7991 	if (soc->intr_mode == DP_INTR_POLL)
7992 		qdf_timer_stop(&soc->int_timer);
7993 
7994 	return QDF_STATUS_SUCCESS;
7995 }
7996 
7997 /**
7998  * dp_runtime_resume() - ensure DP is ready to runtime resume
7999  * @opaque_pdev: DP pdev context
8000  *
8001  * Resume DP for runtime PM.
8002  *
8003  * Return: QDF_STATUS
8004  */
8005 static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
8006 {
8007 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
8008 	struct dp_soc *soc = pdev->soc;
8009 	void *hal_srng;
8010 	int i;
8011 
8012 	if (soc->intr_mode == DP_INTR_POLL)
8013 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
8014 
8015 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
8016 		hal_srng = soc->tcl_data_ring[i].hal_srng;
8017 		if (hal_srng) {
8018 			/* We actually only need to acquire the lock */
8019 			hal_srng_access_start(soc->hal_soc, hal_srng);
8020 			/* Update SRC ring head pointer for HW to send
8021 			   all pending packets */
8022 			hal_srng_access_end(soc->hal_soc, hal_srng);
8023 		}
8024 	}
8025 
8026 	return QDF_STATUS_SUCCESS;
8027 }
8028 #endif /* FEATURE_RUNTIME_PM */
8029 
8030 static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
8031 {
8032 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
8033 	struct dp_soc *soc = pdev->soc;
8034 
8035 	if (soc->intr_mode == DP_INTR_POLL)
8036 		qdf_timer_stop(&soc->int_timer);
8037 
8038 	return QDF_STATUS_SUCCESS;
8039 }
8040 
8041 static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
8042 {
8043 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
8044 	struct dp_soc *soc = pdev->soc;
8045 
8046 	if (soc->intr_mode == DP_INTR_POLL)
8047 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
8048 
8049 	return QDF_STATUS_SUCCESS;
8050 }
8051 
8052 #ifndef CONFIG_WIN
8053 static struct cdp_misc_ops dp_ops_misc = {
8054 	.tx_non_std = dp_tx_non_std,
8055 	.get_opmode = dp_get_opmode,
8056 #ifdef FEATURE_RUNTIME_PM
8057 	.runtime_suspend = dp_runtime_suspend,
8058 	.runtime_resume = dp_runtime_resume,
8059 #endif /* FEATURE_RUNTIME_PM */
8060 	.pkt_log_init = dp_pkt_log_init,
8061 	.pkt_log_con_service = dp_pkt_log_con_service,
8062 };
8063 
8064 static struct cdp_flowctl_ops dp_ops_flowctl = {
8065 	/* WIFI 3.0 DP implement as required. */
8066 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
8067 	.flow_pool_map_handler = dp_tx_flow_pool_map,
8068 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
8069 	.register_pause_cb = dp_txrx_register_pause_cb,
8070 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
8071 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
8072 };
8073 
8074 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
8075 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
8076 };
8077 
8078 #ifdef IPA_OFFLOAD
8079 static struct cdp_ipa_ops dp_ops_ipa = {
8080 	.ipa_get_resource = dp_ipa_get_resource,
8081 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
8082 	.ipa_op_response = dp_ipa_op_response,
8083 	.ipa_register_op_cb = dp_ipa_register_op_cb,
8084 	.ipa_get_stat = dp_ipa_get_stat,
8085 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
8086 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
8087 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
8088 	.ipa_setup = dp_ipa_setup,
8089 	.ipa_cleanup = dp_ipa_cleanup,
8090 	.ipa_setup_iface = dp_ipa_setup_iface,
8091 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
8092 	.ipa_enable_pipes = dp_ipa_enable_pipes,
8093 	.ipa_disable_pipes = dp_ipa_disable_pipes,
8094 	.ipa_set_perf_level = dp_ipa_set_perf_level
8095 };
8096 #endif
8097 
8098 static struct cdp_bus_ops dp_ops_bus = {
8099 	.bus_suspend = dp_bus_suspend,
8100 	.bus_resume = dp_bus_resume
8101 };
8102 
8103 static struct cdp_ocb_ops dp_ops_ocb = {
8104 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
8105 };
8106 
8107 
8108 static struct cdp_throttle_ops dp_ops_throttle = {
8109 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
8110 };
8111 
8112 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
8113 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
8114 };
8115 
8116 static struct cdp_cfg_ops dp_ops_cfg = {
8117 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
8118 };
8119 
8120 /*
8121  * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
8122  * @dev: physical device instance
8123  * @peer_mac_addr: peer mac address
8124  * @local_id: local id for the peer
8125  * @debug_id: to track enum peer access
8126  *
8127  * Return: peer instance pointer
8128  */
8129 static inline void *
8130 dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, u8 *peer_mac_addr,
8131 			     u8 *local_id, enum peer_debug_id_type debug_id)
8132 {
8133 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
8134 	struct dp_peer *peer;
8135 
8136 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
8137 
8138 	if (!peer)
8139 		return NULL;
8140 
8141 	*local_id = peer->local_id;
8142 	DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
8143 
8144 	return peer;
8145 }
8146 
8147 /*
8148  * dp_peer_release_ref - release peer ref count
8149  * @peer: peer handle
8150  * @debug_id: to track enum peer access
8151  *
8152  * Return: None
8153  */
8154 static inline
8155 void dp_peer_release_ref(void *peer, enum peer_debug_id_type debug_id)
8156 {
8157 	dp_peer_unref_delete(peer);
8158 }
8159 
8160 static struct cdp_peer_ops dp_ops_peer = {
8161 	.register_peer = dp_register_peer,
8162 	.clear_peer = dp_clear_peer,
8163 	.find_peer_by_addr = dp_find_peer_by_addr,
8164 	.find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
8165 	.peer_get_ref_by_addr = dp_peer_get_ref_find_by_addr,
8166 	.peer_release_ref = dp_peer_release_ref,
8167 	.local_peer_id = dp_local_peer_id,
8168 	.peer_find_by_local_id = dp_peer_find_by_local_id,
8169 	.peer_state_update = dp_peer_state_update,
8170 	.get_vdevid = dp_get_vdevid,
8171 	.get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
8172 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
8173 	.get_vdev_for_peer = dp_get_vdev_for_peer,
8174 	.get_peer_state = dp_get_peer_state,
8175 };
8176 #endif
8177 
8178 static struct cdp_ops dp_txrx_ops = {
8179 	.cmn_drv_ops = &dp_ops_cmn,
8180 	.ctrl_ops = &dp_ops_ctrl,
8181 	.me_ops = &dp_ops_me,
8182 	.mon_ops = &dp_ops_mon,
8183 	.host_stats_ops = &dp_ops_host_stats,
8184 	.wds_ops = &dp_ops_wds,
8185 	.raw_ops = &dp_ops_raw,
8186 #ifdef CONFIG_WIN
8187 	.pflow_ops = &dp_ops_pflow,
8188 #endif /* CONFIG_WIN */
8189 #ifndef CONFIG_WIN
8190 	.misc_ops = &dp_ops_misc,
8191 	.cfg_ops = &dp_ops_cfg,
8192 	.flowctl_ops = &dp_ops_flowctl,
8193 	.l_flowctl_ops = &dp_ops_l_flowctl,
8194 #ifdef IPA_OFFLOAD
8195 	.ipa_ops = &dp_ops_ipa,
8196 #endif
8197 	.bus_ops = &dp_ops_bus,
8198 	.ocb_ops = &dp_ops_ocb,
8199 	.peer_ops = &dp_ops_peer,
8200 	.throttle_ops = &dp_ops_throttle,
8201 	.mob_stats_ops = &dp_ops_mob_stats,
8202 #endif
8203 };
8204 
8205 /*
8206  * dp_soc_set_txrx_ring_map()
8207  * @dp_soc: DP handler for soc
8208  *
8209  * Return: Void
8210  */
8211 static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
8212 {
8213 	uint32_t i;
8214 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
8215 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_DEFAULT_MAP][i];
8216 	}
8217 }
8218 
8219 #ifdef QCA_WIFI_QCA8074
8220 /**
8221  * dp_soc_attach_wifi3() - Attach txrx SOC
8222  * @ctrl_psoc:	Opaque SOC handle from control plane
8223  * @htc_handle:	Opaque HTC handle
8224  * @hif_handle:	Opaque HIF handle
8225  * @qdf_osdev:	QDF device
8226  * @ol_ops:	Offload Operations
8227  * @device_id:	Device ID
8228  *
8229  * Return: DP SOC handle on success, NULL on failure
8230  */
8231 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
8232 			  HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
8233 			  struct ol_if_ops *ol_ops, uint16_t device_id)
8234 {
8235 	struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
8236 	int target_type;
8237 
8238 	if (!soc) {
8239 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8240 			FL("DP SOC memory allocation failed"));
8241 		goto fail0;
8242 	}
8243 
8244 	soc->device_id = device_id;
8245 	soc->cdp_soc.ops = &dp_txrx_ops;
8246 	soc->cdp_soc.ol_ops = ol_ops;
8247 	soc->ctrl_psoc = ctrl_psoc;
8248 	soc->osdev = qdf_osdev;
8249 	soc->hif_handle = hif_handle;
8250 
8251 	soc->hal_soc = hif_get_hal_handle(hif_handle);
8252 	soc->htt_handle = htt_soc_attach(soc, ctrl_psoc, htc_handle,
8253 		soc->hal_soc, qdf_osdev);
8254 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
8255 
8256 	if (!soc->htt_handle) {
8257 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8258 			FL("HTT attach failed"));
8259 		goto fail1;
8260 	}
8261 
8262 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
8263 	if (!soc->wlan_cfg_ctx) {
8264 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8265 			FL("wlan_cfg_soc_attach failed"));
8266 		goto fail2;
8267 	}
8268 	target_type = hal_get_target_type(soc->hal_soc);
8269 	switch (target_type) {
8270 	case TARGET_TYPE_QCA6290:
8271 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
8272 					       REO_DST_RING_SIZE_QCA6290);
8273 		soc->ast_override_support = 1;
8274 		break;
8275 #ifdef QCA_WIFI_QCA6390
8276 	case TARGET_TYPE_QCA6390:
8277 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
8278 					       REO_DST_RING_SIZE_QCA6290);
8279 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
8280 		soc->ast_override_support = 1;
8281 		break;
8282 #endif
8283 	case TARGET_TYPE_QCA8074:
8284 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
8285 					       REO_DST_RING_SIZE_QCA8074);
8286 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
8287 		soc->hw_nac_monitor_support = 1;
8288 		break;
8289 	case TARGET_TYPE_QCA8074V2:
8290 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
8291 					       REO_DST_RING_SIZE_QCA8074);
8292 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
8293 		soc->hw_nac_monitor_support = 1;
8294 		soc->ast_override_support = 1;
8295 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
8296 		break;
8297 	default:
8298 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
8299 		qdf_assert_always(0);
8300 		break;
8301 	}
8302 
8303 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
8304 			     cfg_get(ctrl_psoc, CFG_DP_RX_HASH));
8305 	soc->cce_disable = false;
8306 
8307 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
8308 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
8309 				CDP_CFG_MAX_PEER_ID);
8310 
8311 		if (ret != -EINVAL) {
8312 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
8313 		}
8314 
8315 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
8316 				CDP_CFG_CCE_DISABLE);
8317 		if (ret == 1)
8318 			soc->cce_disable = true;
8319 	}
8320 
8321 	qdf_spinlock_create(&soc->peer_ref_mutex);
8322 
8323 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
8324 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
8325 
8326 	/* fill the tx/rx cpu ring map*/
8327 	dp_soc_set_txrx_ring_map(soc);
8328 
8329 	qdf_spinlock_create(&soc->htt_stats.lock);
8330 	/* initialize work queue for stats processing */
8331 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
8332 
8333 	/*Initialize inactivity timer for wifison */
8334 	dp_init_inact_timer(soc);
8335 
8336 	return (void *)soc;
8337 
8338 fail2:
8339 	htt_soc_detach(soc->htt_handle);
8340 fail1:
8341 	qdf_mem_free(soc);
8342 fail0:
8343 	return NULL;
8344 }
8345 #endif
8346 
8347 /*
8348  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
8349  *
8350  * @soc: handle to DP soc
8351  * @mac_id: MAC id
8352  *
8353  * Return: Return pdev corresponding to MAC
8354  */
8355 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
8356 {
8357 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
8358 		return soc->pdev_list[mac_id];
8359 
8360 	/* Typically for MCL as there only 1 PDEV*/
8361 	return soc->pdev_list[0];
8362 }
8363 
8364 /*
8365  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
8366  * @soc:		DP SoC context
8367  * @max_mac_rings:	No of MAC rings
8368  *
8369  * Return: None
8370  */
8371 static
8372 void dp_is_hw_dbs_enable(struct dp_soc *soc,
8373 				int *max_mac_rings)
8374 {
8375 	bool dbs_enable = false;
8376 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
8377 		dbs_enable = soc->cdp_soc.ol_ops->
8378 		is_hw_dbs_2x2_capable(soc->ctrl_psoc);
8379 
8380 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
8381 }
8382 
8383 /*
8384 * dp_set_pktlog_wifi3() - attach txrx vdev
8385 * @pdev: Datapath PDEV handle
8386 * @event: which event's notifications are being subscribed to
8387 * @enable: WDI event subscribe or not. (True or False)
8388 *
8389 * Return: Success, NULL on failure
8390 */
8391 #ifdef WDI_EVENT_ENABLE
8392 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
8393 	bool enable)
8394 {
8395 	struct dp_soc *soc = pdev->soc;
8396 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
8397 	int max_mac_rings = wlan_cfg_get_num_mac_rings
8398 					(pdev->wlan_cfg_ctx);
8399 	uint8_t mac_id = 0;
8400 
8401 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
8402 
8403 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
8404 			FL("Max_mac_rings %d "),
8405 			max_mac_rings);
8406 
8407 	if (enable) {
8408 		switch (event) {
8409 		case WDI_EVENT_RX_DESC:
8410 			if (pdev->monitor_vdev) {
8411 				/* Nothing needs to be done if monitor mode is
8412 				 * enabled
8413 				 */
8414 				return 0;
8415 			}
8416 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
8417 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
8418 				htt_tlv_filter.mpdu_start = 1;
8419 				htt_tlv_filter.msdu_start = 1;
8420 				htt_tlv_filter.msdu_end = 1;
8421 				htt_tlv_filter.mpdu_end = 1;
8422 				htt_tlv_filter.packet_header = 1;
8423 				htt_tlv_filter.attention = 1;
8424 				htt_tlv_filter.ppdu_start = 1;
8425 				htt_tlv_filter.ppdu_end = 1;
8426 				htt_tlv_filter.ppdu_end_user_stats = 1;
8427 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
8428 				htt_tlv_filter.ppdu_end_status_done = 1;
8429 				htt_tlv_filter.enable_fp = 1;
8430 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8431 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8432 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8433 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8434 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8435 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
8436 
8437 				for (mac_id = 0; mac_id < max_mac_rings;
8438 								mac_id++) {
8439 					int mac_for_pdev =
8440 						dp_get_mac_id_for_pdev(mac_id,
8441 								pdev->pdev_id);
8442 
8443 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8444 					 mac_for_pdev,
8445 					 pdev->rxdma_mon_status_ring[mac_id]
8446 					 .hal_srng,
8447 					 RXDMA_MONITOR_STATUS,
8448 					 RX_BUFFER_SIZE,
8449 					 &htt_tlv_filter);
8450 
8451 				}
8452 
8453 				if (soc->reap_timer_init)
8454 					qdf_timer_mod(&soc->mon_reap_timer,
8455 					DP_INTR_POLL_TIMER_MS);
8456 			}
8457 			break;
8458 
8459 		case WDI_EVENT_LITE_RX:
8460 			if (pdev->monitor_vdev) {
8461 				/* Nothing needs to be done if monitor mode is
8462 				 * enabled
8463 				 */
8464 				return 0;
8465 			}
8466 
8467 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
8468 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
8469 
8470 				htt_tlv_filter.ppdu_start = 1;
8471 				htt_tlv_filter.ppdu_end = 1;
8472 				htt_tlv_filter.ppdu_end_user_stats = 1;
8473 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
8474 				htt_tlv_filter.ppdu_end_status_done = 1;
8475 				htt_tlv_filter.mpdu_start = 1;
8476 				htt_tlv_filter.enable_fp = 1;
8477 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8478 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8479 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8480 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8481 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8482 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
8483 
8484 				for (mac_id = 0; mac_id < max_mac_rings;
8485 								mac_id++) {
8486 					int mac_for_pdev =
8487 						dp_get_mac_id_for_pdev(mac_id,
8488 								pdev->pdev_id);
8489 
8490 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8491 					mac_for_pdev,
8492 					pdev->rxdma_mon_status_ring[mac_id]
8493 					.hal_srng,
8494 					RXDMA_MONITOR_STATUS,
8495 					RX_BUFFER_SIZE_PKTLOG_LITE,
8496 					&htt_tlv_filter);
8497 				}
8498 
8499 				if (soc->reap_timer_init)
8500 					qdf_timer_mod(&soc->mon_reap_timer,
8501 					DP_INTR_POLL_TIMER_MS);
8502 			}
8503 			break;
8504 
8505 		case WDI_EVENT_LITE_T2H:
8506 			if (pdev->monitor_vdev) {
8507 				/* Nothing needs to be done if monitor mode is
8508 				 * enabled
8509 				 */
8510 				return 0;
8511 			}
8512 
8513 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
8514 				int mac_for_pdev = dp_get_mac_id_for_pdev(
8515 							mac_id,	pdev->pdev_id);
8516 
8517 				pdev->pktlog_ppdu_stats = true;
8518 				dp_h2t_cfg_stats_msg_send(pdev,
8519 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
8520 					mac_for_pdev);
8521 			}
8522 			break;
8523 
8524 		default:
8525 			/* Nothing needs to be done for other pktlog types */
8526 			break;
8527 		}
8528 	} else {
8529 		switch (event) {
8530 		case WDI_EVENT_RX_DESC:
8531 		case WDI_EVENT_LITE_RX:
8532 			if (pdev->monitor_vdev) {
8533 				/* Nothing needs to be done if monitor mode is
8534 				 * enabled
8535 				 */
8536 				return 0;
8537 			}
8538 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
8539 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
8540 
8541 				for (mac_id = 0; mac_id < max_mac_rings;
8542 								mac_id++) {
8543 					int mac_for_pdev =
8544 						dp_get_mac_id_for_pdev(mac_id,
8545 								pdev->pdev_id);
8546 
8547 					htt_h2t_rx_ring_cfg(soc->htt_handle,
8548 					  mac_for_pdev,
8549 					  pdev->rxdma_mon_status_ring[mac_id]
8550 					  .hal_srng,
8551 					  RXDMA_MONITOR_STATUS,
8552 					  RX_BUFFER_SIZE,
8553 					  &htt_tlv_filter);
8554 				}
8555 
8556 				if (soc->reap_timer_init)
8557 					qdf_timer_stop(&soc->mon_reap_timer);
8558 			}
8559 			break;
8560 		case WDI_EVENT_LITE_T2H:
8561 			if (pdev->monitor_vdev) {
8562 				/* Nothing needs to be done if monitor mode is
8563 				 * enabled
8564 				 */
8565 				return 0;
8566 			}
8567 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
8568 			 * passing value 0. Once these macros will define in htt
8569 			 * header file will use proper macros
8570 			*/
8571 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
8572 				int mac_for_pdev =
8573 						dp_get_mac_id_for_pdev(mac_id,
8574 								pdev->pdev_id);
8575 
8576 				pdev->pktlog_ppdu_stats = false;
8577 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
8578 					dp_h2t_cfg_stats_msg_send(pdev, 0,
8579 								mac_for_pdev);
8580 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
8581 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
8582 								mac_for_pdev);
8583 				} else if (pdev->enhanced_stats_en) {
8584 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
8585 								mac_for_pdev);
8586 				}
8587 			}
8588 
8589 			break;
8590 		default:
8591 			/* Nothing needs to be done for other pktlog types */
8592 			break;
8593 		}
8594 	}
8595 	return 0;
8596 }
8597 #endif
8598