xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 4865edfd190c086bbe2c69aae12a8226f877b91e)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_api.h>
25 #include <hif.h>
26 #include <htt.h>
27 #include <wdi_event.h>
28 #include <queue.h>
29 #include "dp_htt.h"
30 #include "dp_types.h"
31 #include "dp_internal.h"
32 #include "dp_tx.h"
33 #include "dp_tx_desc.h"
34 #include "dp_rx.h"
35 #include <cdp_txrx_handle.h>
36 #include <wlan_cfg.h>
37 #include "cdp_txrx_cmn_struct.h"
38 #include "cdp_txrx_stats_struct.h"
39 #include <qdf_util.h>
40 #include "dp_peer.h"
41 #include "dp_rx_mon.h"
42 #include "htt_stats.h"
43 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
44 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
45 #include "cdp_txrx_flow_ctrl_v2.h"
46 #else
47 static inline void
48 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
49 {
50 	return;
51 }
52 #endif
53 #include "dp_ipa.h"
54 
55 #ifdef CONFIG_MCL
56 static void dp_service_mon_rings(void *arg);
57 #ifndef REMOVE_PKT_LOG
58 #include <pktlog_ac_api.h>
59 #include <pktlog_ac.h>
60 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn);
61 #endif
62 #endif
63 static void dp_pktlogmod_exit(struct dp_pdev *handle);
64 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
65 					uint8_t *peer_mac_addr);
66 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
67 
68 #define DP_INTR_POLL_TIMER_MS	10
69 #define DP_WDS_AGING_TIMER_DEFAULT_MS	120000
70 #define DP_MCS_LENGTH (6*MAX_MCS)
71 #define DP_NSS_LENGTH (6*SS_COUNT)
72 #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
73 #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
74 #define DP_MAX_MCS_STRING_LEN 30
75 #define DP_CURR_FW_STATS_AVAIL 19
76 #define DP_HTT_DBG_EXT_STATS_MAX 256
77 #define DP_MAX_SLEEP_TIME 100
78 
79 #ifdef IPA_OFFLOAD
80 /* Exclude IPA rings from the interrupt context */
81 #define TX_RING_MASK_VAL	0xb
82 #define RX_RING_MASK_VAL	0x7
83 #else
84 #define TX_RING_MASK_VAL	0xF
85 #define RX_RING_MASK_VAL	0xF
86 #endif
87 
88 bool rx_hash = 1;
89 qdf_declare_param(rx_hash, bool);
90 
91 #define STR_MAXLEN	64
92 
93 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
94 
95 /* PPDU stats mask sent to FW to enable enhanced stats */
96 #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
97 /* PPDU stats mask sent to FW to support debug sniffer feature */
98 #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
99 /**
100  * default_dscp_tid_map - Default DSCP-TID mapping
101  *
102  * DSCP        TID
103  * 000000      0
104  * 001000      1
105  * 010000      2
106  * 011000      3
107  * 100000      4
108  * 101000      5
109  * 110000      6
110  * 111000      7
111  */
112 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
113 	0, 0, 0, 0, 0, 0, 0, 0,
114 	1, 1, 1, 1, 1, 1, 1, 1,
115 	2, 2, 2, 2, 2, 2, 2, 2,
116 	3, 3, 3, 3, 3, 3, 3, 3,
117 	4, 4, 4, 4, 4, 4, 4, 4,
118 	5, 5, 5, 5, 5, 5, 5, 5,
119 	6, 6, 6, 6, 6, 6, 6, 6,
120 	7, 7, 7, 7, 7, 7, 7, 7,
121 };
122 
123 /*
124  * struct dp_rate_debug
125  *
126  * @mcs_type: print string for a given mcs
127  * @valid: valid mcs rate?
128  */
129 struct dp_rate_debug {
130 	char mcs_type[DP_MAX_MCS_STRING_LEN];
131 	uint8_t valid;
132 };
133 
134 #define MCS_VALID 1
135 #define MCS_INVALID 0
136 
137 static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
138 
139 	{
140 		{"OFDM 48 Mbps", MCS_VALID},
141 		{"OFDM 24 Mbps", MCS_VALID},
142 		{"OFDM 12 Mbps", MCS_VALID},
143 		{"OFDM 6 Mbps ", MCS_VALID},
144 		{"OFDM 54 Mbps", MCS_VALID},
145 		{"OFDM 36 Mbps", MCS_VALID},
146 		{"OFDM 18 Mbps", MCS_VALID},
147 		{"OFDM 9 Mbps ", MCS_VALID},
148 		{"INVALID ", MCS_INVALID},
149 		{"INVALID ", MCS_INVALID},
150 		{"INVALID ", MCS_INVALID},
151 		{"INVALID ", MCS_INVALID},
152 		{"INVALID ", MCS_VALID},
153 	},
154 	{
155 		{"CCK 11 Mbps Long  ", MCS_VALID},
156 		{"CCK 5.5 Mbps Long ", MCS_VALID},
157 		{"CCK 2 Mbps Long   ", MCS_VALID},
158 		{"CCK 1 Mbps Long   ", MCS_VALID},
159 		{"CCK 11 Mbps Short ", MCS_VALID},
160 		{"CCK 5.5 Mbps Short", MCS_VALID},
161 		{"CCK 2 Mbps Short  ", MCS_VALID},
162 		{"INVALID ", MCS_INVALID},
163 		{"INVALID ", MCS_INVALID},
164 		{"INVALID ", MCS_INVALID},
165 		{"INVALID ", MCS_INVALID},
166 		{"INVALID ", MCS_INVALID},
167 		{"INVALID ", MCS_VALID},
168 	},
169 	{
170 		{"HT MCS 0 (BPSK 1/2)  ", MCS_VALID},
171 		{"HT MCS 1 (QPSK 1/2)  ", MCS_VALID},
172 		{"HT MCS 2 (QPSK 3/4)  ", MCS_VALID},
173 		{"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
174 		{"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
175 		{"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
176 		{"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
177 		{"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
178 		{"INVALID ", MCS_INVALID},
179 		{"INVALID ", MCS_INVALID},
180 		{"INVALID ", MCS_INVALID},
181 		{"INVALID ", MCS_INVALID},
182 		{"INVALID ", MCS_VALID},
183 	},
184 	{
185 		{"VHT MCS 0 (BPSK 1/2)     ", MCS_VALID},
186 		{"VHT MCS 1 (QPSK 1/2)     ", MCS_VALID},
187 		{"VHT MCS 2 (QPSK 3/4)     ", MCS_VALID},
188 		{"VHT MCS 3 (16-QAM 1/2)   ", MCS_VALID},
189 		{"VHT MCS 4 (16-QAM 3/4)   ", MCS_VALID},
190 		{"VHT MCS 5 (64-QAM 2/3)   ", MCS_VALID},
191 		{"VHT MCS 6 (64-QAM 3/4)   ", MCS_VALID},
192 		{"VHT MCS 7 (64-QAM 5/6)   ", MCS_VALID},
193 		{"VHT MCS 8 (256-QAM 3/4)  ", MCS_VALID},
194 		{"VHT MCS 9 (256-QAM 5/6)  ", MCS_VALID},
195 		{"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
196 		{"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
197 		{"INVALID ", MCS_VALID},
198 	},
199 	{
200 		{"HE MCS 0 (BPSK 1/2)     ", MCS_VALID},
201 		{"HE MCS 1 (QPSK 1/2)     ", MCS_VALID},
202 		{"HE MCS 2 (QPSK 3/4)     ", MCS_VALID},
203 		{"HE MCS 3 (16-QAM 1/2)   ", MCS_VALID},
204 		{"HE MCS 4 (16-QAM 3/4)   ", MCS_VALID},
205 		{"HE MCS 5 (64-QAM 2/3)   ", MCS_VALID},
206 		{"HE MCS 6 (64-QAM 3/4)   ", MCS_VALID},
207 		{"HE MCS 7 (64-QAM 5/6)   ", MCS_VALID},
208 		{"HE MCS 8 (256-QAM 3/4)  ", MCS_VALID},
209 		{"HE MCS 9 (256-QAM 5/6)  ", MCS_VALID},
210 		{"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
211 		{"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
212 		{"INVALID ", MCS_VALID},
213 	}
214 };
215 
216 /**
217  * @brief Cpu ring map types
218  */
219 enum dp_cpu_ring_map_types {
220 	DP_DEFAULT_MAP,
221 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
222 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
223 	DP_NSS_ALL_RADIO_OFFLOADED_MAP,
224 	DP_CPU_RING_MAP_MAX
225 };
226 
227 /**
228  * @brief Cpu to tx ring map
229  */
230 static uint8_t dp_cpu_ring_map[DP_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
231 	{0x0, 0x1, 0x2, 0x0},
232 	{0x1, 0x2, 0x1, 0x2},
233 	{0x0, 0x2, 0x0, 0x2},
234 	{0x2, 0x2, 0x2, 0x2}
235 };
236 
237 /**
238  * @brief Select the type of statistics
239  */
240 enum dp_stats_type {
241 	STATS_FW = 0,
242 	STATS_HOST = 1,
243 	STATS_TYPE_MAX = 2,
244 };
245 
246 /**
247  * @brief General Firmware statistics options
248  *
249  */
250 enum dp_fw_stats {
251 	TXRX_FW_STATS_INVALID	= -1,
252 };
253 
254 /**
255  * dp_stats_mapping_table - Firmware and Host statistics
256  * currently supported
257  */
258 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
259 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
260 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
261 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
262 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
263 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
264 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
265 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
266 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
267 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
268 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
269 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
270 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
271 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
272 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
273 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
274 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
275 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
276 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
277 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
278 	/* Last ENUM for HTT FW STATS */
279 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
280 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
281 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
282 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
283 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
284 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
285 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
286 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
287 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
288 };
289 
290 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
291 					struct cdp_peer *peer_hdl,
292 					uint8_t *mac_addr,
293 					enum cdp_txrx_ast_entry_type type,
294 					uint32_t flags)
295 {
296 
297 	return dp_peer_add_ast((struct dp_soc *)soc_hdl,
298 				(struct dp_peer *)peer_hdl,
299 				mac_addr,
300 				type,
301 				flags);
302 }
303 
304 static void dp_peer_del_ast_wifi3(struct cdp_soc_t *soc_hdl,
305 					 void *ast_entry_hdl)
306 {
307 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
308 	qdf_spin_lock_bh(&soc->ast_lock);
309 	dp_peer_del_ast((struct dp_soc *)soc_hdl,
310 			(struct dp_ast_entry *)ast_entry_hdl);
311 	qdf_spin_unlock_bh(&soc->ast_lock);
312 }
313 
314 
315 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
316 						struct cdp_peer *peer_hdl,
317 						uint8_t *wds_macaddr,
318 						uint32_t flags)
319 {
320 	int status;
321 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
322 	struct dp_ast_entry  *ast_entry = NULL;
323 
324 	qdf_spin_lock_bh(&soc->ast_lock);
325 	ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
326 
327 	status = dp_peer_update_ast(soc,
328 					(struct dp_peer *)peer_hdl,
329 					ast_entry,
330 					flags);
331 	qdf_spin_unlock_bh(&soc->ast_lock);
332 
333 	return status;
334 }
335 
336 /*
337  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
338  * @soc_handle: Datapath SOC handle
339  * @wds_macaddr: MAC address of the WDS entry to be added
340  * @vdev_hdl: vdev handle
341  * Return: None
342  */
343 static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
344 				    uint8_t *wds_macaddr, void *vdev_hdl)
345 {
346 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
347 	struct dp_ast_entry *ast_entry = NULL;
348 
349 	qdf_spin_lock_bh(&soc->ast_lock);
350 	ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
351 
352 	if (ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) {
353 		ast_entry->is_active = TRUE;
354 	}
355 	qdf_spin_unlock_bh(&soc->ast_lock);
356 }
357 
358 /*
359  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
360  * @soc: Datapath SOC handle
361  * @vdev_hdl: vdev handle
362  *
363  * Return: None
364  */
365 static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t *soc_hdl,
366 					 void *vdev_hdl)
367 {
368 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
369 	struct dp_pdev *pdev;
370 	struct dp_vdev *vdev;
371 	struct dp_peer *peer;
372 	struct dp_ast_entry *ase, *temp_ase;
373 	int i;
374 
375 	qdf_spin_lock_bh(&soc->ast_lock);
376 
377 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
378 		pdev = soc->pdev_list[i];
379 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
380 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
381 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
382 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
383 					if (ase->type ==
384 						CDP_TXRX_AST_TYPE_STATIC)
385 						continue;
386 					ase->is_active = TRUE;
387 				}
388 			}
389 		}
390 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
391 	}
392 
393 	qdf_spin_unlock_bh(&soc->ast_lock);
394 }
395 
396 /*
397  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
398  * @soc:		Datapath SOC handle
399  *
400  * Return: None
401  */
402 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
403 {
404 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
405 	struct dp_pdev *pdev;
406 	struct dp_vdev *vdev;
407 	struct dp_peer *peer;
408 	struct dp_ast_entry *ase, *temp_ase;
409 	int i;
410 
411 	qdf_spin_lock_bh(&soc->ast_lock);
412 
413 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
414 		pdev = soc->pdev_list[i];
415 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
416 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
417 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
418 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
419 					if (ase->type ==
420 						CDP_TXRX_AST_TYPE_STATIC)
421 						continue;
422 					dp_peer_del_ast(soc, ase);
423 				}
424 			}
425 		}
426 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
427 	}
428 
429 	qdf_spin_unlock_bh(&soc->ast_lock);
430 }
431 
432 static void *dp_peer_ast_hash_find_wifi3(struct cdp_soc_t *soc_hdl,
433 						uint8_t *ast_mac_addr)
434 {
435 	struct dp_ast_entry *ast_entry;
436 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
437 	qdf_spin_lock_bh(&soc->ast_lock);
438 	ast_entry = dp_peer_ast_hash_find(soc, ast_mac_addr);
439 	qdf_spin_unlock_bh(&soc->ast_lock);
440 	return (void *)ast_entry;
441 }
442 
443 static uint8_t dp_peer_ast_get_pdev_id_wifi3(struct cdp_soc_t *soc_hdl,
444 							void *ast_entry_hdl)
445 {
446 	return dp_peer_ast_get_pdev_id((struct dp_soc *)soc_hdl,
447 					(struct dp_ast_entry *)ast_entry_hdl);
448 }
449 
450 static uint8_t dp_peer_ast_get_next_hop_wifi3(struct cdp_soc_t *soc_hdl,
451 							void *ast_entry_hdl)
452 {
453 	return dp_peer_ast_get_next_hop((struct dp_soc *)soc_hdl,
454 					(struct dp_ast_entry *)ast_entry_hdl);
455 }
456 
457 static void dp_peer_ast_set_type_wifi3(
458 					struct cdp_soc_t *soc_hdl,
459 					void *ast_entry_hdl,
460 					enum cdp_txrx_ast_entry_type type)
461 {
462 	dp_peer_ast_set_type((struct dp_soc *)soc_hdl,
463 				(struct dp_ast_entry *)ast_entry_hdl,
464 				type);
465 }
466 
467 
468 
469 /**
470  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
471  * @ring_num: ring num of the ring being queried
472  * @grp_mask: the grp_mask array for the ring type in question.
473  *
474  * The grp_mask array is indexed by group number and the bit fields correspond
475  * to ring numbers.  We are finding which interrupt group a ring belongs to.
476  *
477  * Return: the index in the grp_mask array with the ring number.
478  * -QDF_STATUS_E_NOENT if no entry is found
479  */
480 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
481 {
482 	int ext_group_num;
483 	int mask = 1 << ring_num;
484 
485 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
486 	     ext_group_num++) {
487 		if (mask & grp_mask[ext_group_num])
488 			return ext_group_num;
489 	}
490 
491 	return -QDF_STATUS_E_NOENT;
492 }
493 
494 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
495 				       enum hal_ring_type ring_type,
496 				       int ring_num)
497 {
498 	int *grp_mask;
499 
500 	switch (ring_type) {
501 	case WBM2SW_RELEASE:
502 		/* dp_tx_comp_handler - soc->tx_comp_ring */
503 		if (ring_num < 3)
504 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
505 
506 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
507 		else if (ring_num == 3) {
508 			/* sw treats this as a separate ring type */
509 			grp_mask = &soc->wlan_cfg_ctx->
510 				int_rx_wbm_rel_ring_mask[0];
511 			ring_num = 0;
512 		} else {
513 			qdf_assert(0);
514 			return -QDF_STATUS_E_NOENT;
515 		}
516 	break;
517 
518 	case REO_EXCEPTION:
519 		/* dp_rx_err_process - &soc->reo_exception_ring */
520 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
521 	break;
522 
523 	case REO_DST:
524 		/* dp_rx_process - soc->reo_dest_ring */
525 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
526 	break;
527 
528 	case REO_STATUS:
529 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
530 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
531 	break;
532 
533 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
534 	case RXDMA_MONITOR_STATUS:
535 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
536 	case RXDMA_MONITOR_DST:
537 		/* dp_mon_process */
538 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
539 	break;
540 	case RXDMA_DST:
541 		/* dp_rxdma_err_process */
542 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
543 	break;
544 
545 	case RXDMA_BUF:
546 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
547 	break;
548 
549 	case RXDMA_MONITOR_BUF:
550 		/* TODO: support low_thresh interrupt */
551 		return -QDF_STATUS_E_NOENT;
552 	break;
553 
554 	case TCL_DATA:
555 	case TCL_CMD:
556 	case REO_CMD:
557 	case SW2WBM_RELEASE:
558 	case WBM_IDLE_LINK:
559 		/* normally empty SW_TO_HW rings */
560 		return -QDF_STATUS_E_NOENT;
561 	break;
562 
563 	case TCL_STATUS:
564 	case REO_REINJECT:
565 		/* misc unused rings */
566 		return -QDF_STATUS_E_NOENT;
567 	break;
568 
569 	case CE_SRC:
570 	case CE_DST:
571 	case CE_DST_STATUS:
572 		/* CE_rings - currently handled by hif */
573 	default:
574 		return -QDF_STATUS_E_NOENT;
575 	break;
576 	}
577 
578 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
579 }
580 
581 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
582 			      *ring_params, int ring_type, int ring_num)
583 {
584 	int msi_group_number;
585 	int msi_data_count;
586 	int ret;
587 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
588 
589 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
590 					    &msi_data_count, &msi_data_start,
591 					    &msi_irq_start);
592 
593 	if (ret)
594 		return;
595 
596 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
597 						       ring_num);
598 	if (msi_group_number < 0) {
599 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
600 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
601 			ring_type, ring_num);
602 		ring_params->msi_addr = 0;
603 		ring_params->msi_data = 0;
604 		return;
605 	}
606 
607 	if (msi_group_number > msi_data_count) {
608 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
609 			FL("2 msi_groups will share an msi; msi_group_num %d"),
610 			msi_group_number);
611 
612 		QDF_ASSERT(0);
613 	}
614 
615 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
616 
617 	ring_params->msi_addr = addr_low;
618 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
619 	ring_params->msi_data = (msi_group_number % msi_data_count)
620 		+ msi_data_start;
621 	ring_params->flags |= HAL_SRNG_MSI_INTR;
622 }
623 
624 /**
625  * dp_print_ast_stats() - Dump AST table contents
626  * @soc: Datapath soc handle
627  *
628  * return void
629  */
630 #ifdef FEATURE_AST
631 static void dp_print_ast_stats(struct dp_soc *soc)
632 {
633 	uint8_t i;
634 	uint8_t num_entries = 0;
635 	struct dp_vdev *vdev;
636 	struct dp_pdev *pdev;
637 	struct dp_peer *peer;
638 	struct dp_ast_entry *ase, *tmp_ase;
639 	char type[5][10] = {"NONE", "STATIC", "WDS", "MEC", "HMWDS"};
640 
641 	DP_PRINT_STATS("AST Stats:");
642 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
643 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
644 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
645 	DP_PRINT_STATS("AST Table:");
646 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
647 		pdev = soc->pdev_list[i];
648 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
649 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
650 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
651 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
652 					DP_PRINT_STATS("%6d mac_addr = %pM"
653 							" peer_mac_addr = %pM"
654 							" type = %s"
655 							" next_hop = %d"
656 							" is_active = %d"
657 							" is_bss = %d"
658 							" ast_idx = %d"
659 							" pdev_id = %d"
660 							" vdev_id = %d",
661 							++num_entries,
662 							ase->mac_addr.raw,
663 							ase->peer->mac_addr.raw,
664 							type[ase->type],
665 							ase->next_hop,
666 							ase->is_active,
667 							ase->is_bss,
668 							ase->ast_idx,
669 							ase->pdev_id,
670 							ase->vdev_id);
671 				}
672 			}
673 		}
674 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
675 	}
676 }
677 #else
678 static void dp_print_ast_stats(struct dp_soc *soc)
679 {
680 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
681 	return;
682 }
683 #endif
684 
685 static void dp_print_peer_table(struct dp_vdev *vdev)
686 {
687 	struct dp_peer *peer = NULL;
688 
689 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
690 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
691 		if (!peer) {
692 			DP_PRINT_STATS("Invalid Peer");
693 			return;
694 		}
695 		DP_PRINT_STATS("    peer_mac_addr = %pM"
696 			" nawds_enabled = %d"
697 			" bss_peer = %d"
698 			" wapi = %d"
699 			" wds_enabled = %d"
700 			" delete in progress = %d",
701 			peer->mac_addr.raw,
702 			peer->nawds_enabled,
703 			peer->bss_peer,
704 			peer->wapi,
705 			peer->wds_enabled,
706 			peer->delete_in_progress);
707 	}
708 }
709 
710 /*
711  * dp_setup_srng - Internal function to setup SRNG rings used by data path
712  */
713 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
714 	int ring_type, int ring_num, int mac_id, uint32_t num_entries)
715 {
716 	void *hal_soc = soc->hal_soc;
717 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
718 	/* TODO: See if we should get align size from hal */
719 	uint32_t ring_base_align = 8;
720 	struct hal_srng_params ring_params;
721 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
722 
723 	/* TODO: Currently hal layer takes care of endianness related settings.
724 	 * See if these settings need to passed from DP layer
725 	 */
726 	ring_params.flags = 0;
727 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
728 		  FL("Ring type: %d, num:%d"), ring_type, ring_num);
729 
730 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
731 	srng->hal_srng = NULL;
732 	srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
733 	srng->num_entries = num_entries;
734 	srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
735 		soc->osdev, soc->osdev->dev, srng->alloc_size,
736 		&(srng->base_paddr_unaligned));
737 
738 	if (!srng->base_vaddr_unaligned) {
739 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
740 			FL("alloc failed - ring_type: %d, ring_num %d"),
741 			ring_type, ring_num);
742 		return QDF_STATUS_E_NOMEM;
743 	}
744 
745 	ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
746 		((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
747 	ring_params.ring_base_paddr = srng->base_paddr_unaligned +
748 		((unsigned long)(ring_params.ring_base_vaddr) -
749 		(unsigned long)srng->base_vaddr_unaligned);
750 	ring_params.num_entries = num_entries;
751 
752 	if (soc->intr_mode == DP_INTR_MSI) {
753 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
754 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
755 			FL("Using MSI for ring_type: %d, ring_num %d"),
756 			ring_type, ring_num);
757 
758 	} else {
759 		ring_params.msi_data = 0;
760 		ring_params.msi_addr = 0;
761 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
762 			FL("Skipping MSI for ring_type: %d, ring_num %d"),
763 			ring_type, ring_num);
764 	}
765 
766 	/*
767 	 * Setup interrupt timer and batch counter thresholds for
768 	 * interrupt mitigation based on ring type
769 	 */
770 	if (ring_type == REO_DST) {
771 		ring_params.intr_timer_thres_us =
772 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
773 		ring_params.intr_batch_cntr_thres_entries =
774 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
775 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
776 		ring_params.intr_timer_thres_us =
777 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
778 		ring_params.intr_batch_cntr_thres_entries =
779 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
780 	} else {
781 		ring_params.intr_timer_thres_us =
782 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
783 		ring_params.intr_batch_cntr_thres_entries =
784 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
785 	}
786 
787 	/* Enable low threshold interrupts for rx buffer rings (regular and
788 	 * monitor buffer rings.
789 	 * TODO: See if this is required for any other ring
790 	 */
791 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
792 		(ring_type == RXDMA_MONITOR_STATUS)) {
793 		/* TODO: Setting low threshold to 1/8th of ring size
794 		 * see if this needs to be configurable
795 		 */
796 		ring_params.low_threshold = num_entries >> 3;
797 		ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
798 		ring_params.intr_timer_thres_us =
799 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
800 		ring_params.intr_batch_cntr_thres_entries = 0;
801 	}
802 
803 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
804 		mac_id, &ring_params);
805 
806 	if (!srng->hal_srng) {
807 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
808 				srng->alloc_size,
809 				srng->base_vaddr_unaligned,
810 				srng->base_paddr_unaligned, 0);
811 	}
812 
813 	return 0;
814 }
815 
816 /**
817  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
818  * Any buffers allocated and attached to ring entries are expected to be freed
819  * before calling this function.
820  */
821 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
822 	int ring_type, int ring_num)
823 {
824 	if (!srng->hal_srng) {
825 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
826 			FL("Ring type: %d, num:%d not setup"),
827 			ring_type, ring_num);
828 		return;
829 	}
830 
831 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
832 
833 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
834 				srng->alloc_size,
835 				srng->base_vaddr_unaligned,
836 				srng->base_paddr_unaligned, 0);
837 	srng->hal_srng = NULL;
838 }
839 
840 /* TODO: Need this interface from HIF */
841 void *hif_get_hal_handle(void *hif_handle);
842 
843 /*
844  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
845  * @dp_ctx: DP SOC handle
846  * @budget: Number of frames/descriptors that can be processed in one shot
847  *
848  * Return: remaining budget/quota for the soc device
849  */
850 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
851 {
852 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
853 	struct dp_soc *soc = int_ctx->soc;
854 	int ring = 0;
855 	uint32_t work_done  = 0;
856 	int budget = dp_budget;
857 	uint8_t tx_mask = int_ctx->tx_ring_mask;
858 	uint8_t rx_mask = int_ctx->rx_ring_mask;
859 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
860 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
861 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
862 	uint32_t remaining_quota = dp_budget;
863 	struct dp_pdev *pdev = NULL;
864 	int mac_id;
865 
866 	/* Process Tx completion interrupts first to return back buffers */
867 	while (tx_mask) {
868 		if (tx_mask & 0x1) {
869 			work_done = dp_tx_comp_handler(soc,
870 					soc->tx_comp_ring[ring].hal_srng,
871 					remaining_quota);
872 
873 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
874 				"tx mask 0x%x ring %d, budget %d, work_done %d",
875 				tx_mask, ring, budget, work_done);
876 
877 			budget -= work_done;
878 			if (budget <= 0)
879 				goto budget_done;
880 
881 			remaining_quota = budget;
882 		}
883 		tx_mask = tx_mask >> 1;
884 		ring++;
885 	}
886 
887 
888 	/* Process REO Exception ring interrupt */
889 	if (rx_err_mask) {
890 		work_done = dp_rx_err_process(soc,
891 				soc->reo_exception_ring.hal_srng,
892 				remaining_quota);
893 
894 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
895 			"REO Exception Ring: work_done %d budget %d",
896 			work_done, budget);
897 
898 		budget -=  work_done;
899 		if (budget <= 0) {
900 			goto budget_done;
901 		}
902 		remaining_quota = budget;
903 	}
904 
905 	/* Process Rx WBM release ring interrupt */
906 	if (rx_wbm_rel_mask) {
907 		work_done = dp_rx_wbm_err_process(soc,
908 				soc->rx_rel_ring.hal_srng, remaining_quota);
909 
910 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
911 			"WBM Release Ring: work_done %d budget %d",
912 			work_done, budget);
913 
914 		budget -=  work_done;
915 		if (budget <= 0) {
916 			goto budget_done;
917 		}
918 		remaining_quota = budget;
919 	}
920 
921 	/* Process Rx interrupts */
922 	if (rx_mask) {
923 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
924 			if (rx_mask & (1 << ring)) {
925 				work_done = dp_rx_process(int_ctx,
926 					    soc->reo_dest_ring[ring].hal_srng,
927 					    remaining_quota);
928 
929 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
930 					"rx mask 0x%x ring %d, work_done %d budget %d",
931 					rx_mask, ring, work_done, budget);
932 
933 				budget -=  work_done;
934 				if (budget <= 0)
935 					goto budget_done;
936 				remaining_quota = budget;
937 			}
938 		}
939 		for (ring = 0; ring < MAX_RX_MAC_RINGS; ring++) {
940 			work_done = dp_rxdma_err_process(soc, ring,
941 						remaining_quota);
942 			budget -= work_done;
943 		}
944 	}
945 
946 	if (reo_status_mask)
947 		dp_reo_status_ring_handler(soc);
948 
949 	/* Process LMAC interrupts */
950 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
951 		pdev = soc->pdev_list[ring];
952 		if (pdev == NULL)
953 			continue;
954 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
955 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
956 								pdev->pdev_id);
957 
958 			if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
959 				work_done = dp_mon_process(soc, mac_for_pdev,
960 						remaining_quota);
961 				budget -= work_done;
962 				if (budget <= 0)
963 					goto budget_done;
964 				remaining_quota = budget;
965 			}
966 
967 			if (int_ctx->rxdma2host_ring_mask &
968 					(1 << mac_for_pdev)) {
969 				work_done = dp_rxdma_err_process(soc,
970 							mac_for_pdev,
971 							remaining_quota);
972 				budget -=  work_done;
973 				if (budget <= 0)
974 					goto budget_done;
975 				remaining_quota = budget;
976 			}
977 
978 			if (int_ctx->host2rxdma_ring_mask &
979 						(1 << mac_for_pdev)) {
980 				union dp_rx_desc_list_elem_t *desc_list = NULL;
981 				union dp_rx_desc_list_elem_t *tail = NULL;
982 				struct dp_srng *rx_refill_buf_ring =
983 					&pdev->rx_refill_buf_ring;
984 
985 				DP_STATS_INC(pdev, replenish.low_thresh_intrs,
986 						1);
987 				dp_rx_buffers_replenish(soc, mac_for_pdev,
988 					rx_refill_buf_ring,
989 					&soc->rx_desc_buf[mac_for_pdev], 0,
990 					&desc_list, &tail);
991 			}
992 		}
993 	}
994 
995 	qdf_lro_flush(int_ctx->lro_ctx);
996 
997 budget_done:
998 	return dp_budget - budget;
999 }
1000 
1001 #ifdef DP_INTR_POLL_BASED
1002 /* dp_interrupt_timer()- timer poll for interrupts
1003  *
1004  * @arg: SoC Handle
1005  *
1006  * Return:
1007  *
1008  */
1009 static void dp_interrupt_timer(void *arg)
1010 {
1011 	struct dp_soc *soc = (struct dp_soc *) arg;
1012 	int i;
1013 
1014 	if (qdf_atomic_read(&soc->cmn_init_done)) {
1015 		for (i = 0;
1016 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1017 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
1018 
1019 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1020 	}
1021 }
1022 
1023 /*
1024  * dp_soc_interrupt_attach_poll() - Register handlers for DP interrupts
1025  * @txrx_soc: DP SOC handle
1026  *
1027  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1028  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1029  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1030  *
1031  * Return: 0 for success. nonzero for failure.
1032  */
1033 static QDF_STATUS dp_soc_interrupt_attach_poll(void *txrx_soc)
1034 {
1035 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1036 	int i;
1037 
1038 	soc->intr_mode = DP_INTR_POLL;
1039 
1040 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1041 		soc->intr_ctx[i].dp_intr_id = i;
1042 		soc->intr_ctx[i].tx_ring_mask =
1043 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1044 		soc->intr_ctx[i].rx_ring_mask =
1045 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1046 		soc->intr_ctx[i].rx_mon_ring_mask =
1047 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1048 		soc->intr_ctx[i].rx_err_ring_mask =
1049 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1050 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1051 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1052 		soc->intr_ctx[i].reo_status_ring_mask =
1053 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1054 		soc->intr_ctx[i].rxdma2host_ring_mask =
1055 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1056 		soc->intr_ctx[i].soc = soc;
1057 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1058 	}
1059 
1060 	qdf_timer_init(soc->osdev, &soc->int_timer,
1061 			dp_interrupt_timer, (void *)soc,
1062 			QDF_TIMER_TYPE_WAKE_APPS);
1063 
1064 	return QDF_STATUS_SUCCESS;
1065 }
1066 
1067 #if defined(CONFIG_MCL)
1068 extern int con_mode_monitor;
1069 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
1070 /*
1071  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1072  * @txrx_soc: DP SOC handle
1073  *
1074  * Call the appropriate attach function based on the mode of operation.
1075  * This is a WAR for enabling monitor mode.
1076  *
1077  * Return: 0 for success. nonzero for failure.
1078  */
1079 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1080 {
1081 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1082 
1083 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1084 	     con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
1085 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1086 				  "%s: Poll mode", __func__);
1087 		return dp_soc_interrupt_attach_poll(txrx_soc);
1088 	} else {
1089 
1090 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1091 				  "%s: Interrupt  mode", __func__);
1092 		return dp_soc_interrupt_attach(txrx_soc);
1093 	}
1094 }
1095 #else
1096 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1097 {
1098 	return dp_soc_interrupt_attach_poll(txrx_soc);
1099 }
1100 #endif
1101 #endif
1102 
1103 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1104 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1105 {
1106 	int j;
1107 	int num_irq = 0;
1108 
1109 	int tx_mask =
1110 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1111 	int rx_mask =
1112 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1113 	int rx_mon_mask =
1114 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1115 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1116 					soc->wlan_cfg_ctx, intr_ctx_num);
1117 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1118 					soc->wlan_cfg_ctx, intr_ctx_num);
1119 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1120 					soc->wlan_cfg_ctx, intr_ctx_num);
1121 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1122 					soc->wlan_cfg_ctx, intr_ctx_num);
1123 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1124 					soc->wlan_cfg_ctx, intr_ctx_num);
1125 
1126 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1127 
1128 		if (tx_mask & (1 << j)) {
1129 			irq_id_map[num_irq++] =
1130 				(wbm2host_tx_completions_ring1 - j);
1131 		}
1132 
1133 		if (rx_mask & (1 << j)) {
1134 			irq_id_map[num_irq++] =
1135 				(reo2host_destination_ring1 - j);
1136 		}
1137 
1138 		if (rxdma2host_ring_mask & (1 << j)) {
1139 			irq_id_map[num_irq++] =
1140 				rxdma2host_destination_ring_mac1 -
1141 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1142 		}
1143 
1144 		if (host2rxdma_ring_mask & (1 << j)) {
1145 			irq_id_map[num_irq++] =
1146 				host2rxdma_host_buf_ring_mac1 -
1147 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1148 		}
1149 
1150 		if (rx_mon_mask & (1 << j)) {
1151 			irq_id_map[num_irq++] =
1152 				ppdu_end_interrupts_mac1 -
1153 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1154 			irq_id_map[num_irq++] =
1155 				rxdma2host_monitor_status_ring_mac1 -
1156 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1157 		}
1158 
1159 		if (rx_wbm_rel_ring_mask & (1 << j))
1160 			irq_id_map[num_irq++] = wbm2host_rx_release;
1161 
1162 		if (rx_err_ring_mask & (1 << j))
1163 			irq_id_map[num_irq++] = reo2host_exception;
1164 
1165 		if (reo_status_ring_mask & (1 << j))
1166 			irq_id_map[num_irq++] = reo2host_status;
1167 
1168 	}
1169 	*num_irq_r = num_irq;
1170 }
1171 
1172 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1173 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1174 		int msi_vector_count, int msi_vector_start)
1175 {
1176 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1177 					soc->wlan_cfg_ctx, intr_ctx_num);
1178 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1179 					soc->wlan_cfg_ctx, intr_ctx_num);
1180 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1181 					soc->wlan_cfg_ctx, intr_ctx_num);
1182 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1183 					soc->wlan_cfg_ctx, intr_ctx_num);
1184 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1185 					soc->wlan_cfg_ctx, intr_ctx_num);
1186 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1187 					soc->wlan_cfg_ctx, intr_ctx_num);
1188 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1189 					soc->wlan_cfg_ctx, intr_ctx_num);
1190 
1191 	unsigned int vector =
1192 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1193 	int num_irq = 0;
1194 
1195 	soc->intr_mode = DP_INTR_MSI;
1196 
1197 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1198 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1199 		irq_id_map[num_irq++] =
1200 			pld_get_msi_irq(soc->osdev->dev, vector);
1201 
1202 	*num_irq_r = num_irq;
1203 }
1204 
1205 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1206 				    int *irq_id_map, int *num_irq)
1207 {
1208 	int msi_vector_count, ret;
1209 	uint32_t msi_base_data, msi_vector_start;
1210 
1211 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1212 					    &msi_vector_count,
1213 					    &msi_base_data,
1214 					    &msi_vector_start);
1215 	if (ret)
1216 		return dp_soc_interrupt_map_calculate_integrated(soc,
1217 				intr_ctx_num, irq_id_map, num_irq);
1218 
1219 	else
1220 		dp_soc_interrupt_map_calculate_msi(soc,
1221 				intr_ctx_num, irq_id_map, num_irq,
1222 				msi_vector_count, msi_vector_start);
1223 }
1224 
1225 /*
1226  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1227  * @txrx_soc: DP SOC handle
1228  *
1229  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1230  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1231  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1232  *
1233  * Return: 0 for success. nonzero for failure.
1234  */
1235 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
1236 {
1237 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1238 
1239 	int i = 0;
1240 	int num_irq = 0;
1241 
1242 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1243 		int ret = 0;
1244 
1245 		/* Map of IRQ ids registered with one interrupt context */
1246 		int irq_id_map[HIF_MAX_GRP_IRQ];
1247 
1248 		int tx_mask =
1249 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1250 		int rx_mask =
1251 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1252 		int rx_mon_mask =
1253 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1254 		int rx_err_ring_mask =
1255 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1256 		int rx_wbm_rel_ring_mask =
1257 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1258 		int reo_status_ring_mask =
1259 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1260 		int rxdma2host_ring_mask =
1261 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1262 		int host2rxdma_ring_mask =
1263 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1264 
1265 
1266 		soc->intr_ctx[i].dp_intr_id = i;
1267 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1268 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1269 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1270 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1271 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1272 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1273 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1274 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1275 
1276 		soc->intr_ctx[i].soc = soc;
1277 
1278 		num_irq = 0;
1279 
1280 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1281 					       &num_irq);
1282 
1283 		ret = hif_register_ext_group(soc->hif_handle,
1284 				num_irq, irq_id_map, dp_service_srngs,
1285 				&soc->intr_ctx[i], "dp_intr",
1286 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1287 
1288 		if (ret) {
1289 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1290 			FL("failed, ret = %d"), ret);
1291 
1292 			return QDF_STATUS_E_FAILURE;
1293 		}
1294 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1295 	}
1296 
1297 	hif_configure_ext_group_interrupts(soc->hif_handle);
1298 
1299 	return QDF_STATUS_SUCCESS;
1300 }
1301 
1302 /*
1303  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1304  * @txrx_soc: DP SOC handle
1305  *
1306  * Return: void
1307  */
1308 static void dp_soc_interrupt_detach(void *txrx_soc)
1309 {
1310 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1311 	int i;
1312 
1313 	if (soc->intr_mode == DP_INTR_POLL) {
1314 		qdf_timer_stop(&soc->int_timer);
1315 		qdf_timer_free(&soc->int_timer);
1316 	} else {
1317 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1318 	}
1319 
1320 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1321 		soc->intr_ctx[i].tx_ring_mask = 0;
1322 		soc->intr_ctx[i].rx_ring_mask = 0;
1323 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1324 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1325 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1326 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1327 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1328 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1329 
1330 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1331 	}
1332 }
1333 
1334 #define AVG_MAX_MPDUS_PER_TID 128
1335 #define AVG_TIDS_PER_CLIENT 2
1336 #define AVG_FLOWS_PER_TID 2
1337 #define AVG_MSDUS_PER_FLOW 128
1338 #define AVG_MSDUS_PER_MPDU 4
1339 
1340 /*
1341  * Allocate and setup link descriptor pool that will be used by HW for
1342  * various link and queue descriptors and managed by WBM
1343  */
1344 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1345 {
1346 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1347 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1348 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1349 	uint32_t num_mpdus_per_link_desc =
1350 		hal_num_mpdus_per_link_desc(soc->hal_soc);
1351 	uint32_t num_msdus_per_link_desc =
1352 		hal_num_msdus_per_link_desc(soc->hal_soc);
1353 	uint32_t num_mpdu_links_per_queue_desc =
1354 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1355 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1356 	uint32_t total_link_descs, total_mem_size;
1357 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1358 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1359 	uint32_t num_link_desc_banks;
1360 	uint32_t last_bank_size = 0;
1361 	uint32_t entry_size, num_entries;
1362 	int i;
1363 	uint32_t desc_id = 0;
1364 
1365 	/* Only Tx queue descriptors are allocated from common link descriptor
1366 	 * pool Rx queue descriptors are not included in this because (REO queue
1367 	 * extension descriptors) they are expected to be allocated contiguously
1368 	 * with REO queue descriptors
1369 	 */
1370 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1371 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1372 
1373 	num_mpdu_queue_descs = num_mpdu_link_descs /
1374 		num_mpdu_links_per_queue_desc;
1375 
1376 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1377 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1378 		num_msdus_per_link_desc;
1379 
1380 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1381 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1382 
1383 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1384 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1385 
1386 	/* Round up to power of 2 */
1387 	total_link_descs = 1;
1388 	while (total_link_descs < num_entries)
1389 		total_link_descs <<= 1;
1390 
1391 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1392 		FL("total_link_descs: %u, link_desc_size: %d"),
1393 		total_link_descs, link_desc_size);
1394 	total_mem_size =  total_link_descs * link_desc_size;
1395 
1396 	total_mem_size += link_desc_align;
1397 
1398 	if (total_mem_size <= max_alloc_size) {
1399 		num_link_desc_banks = 0;
1400 		last_bank_size = total_mem_size;
1401 	} else {
1402 		num_link_desc_banks = (total_mem_size) /
1403 			(max_alloc_size - link_desc_align);
1404 		last_bank_size = total_mem_size %
1405 			(max_alloc_size - link_desc_align);
1406 	}
1407 
1408 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1409 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
1410 		total_mem_size, num_link_desc_banks);
1411 
1412 	for (i = 0; i < num_link_desc_banks; i++) {
1413 		soc->link_desc_banks[i].base_vaddr_unaligned =
1414 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1415 			max_alloc_size,
1416 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1417 		soc->link_desc_banks[i].size = max_alloc_size;
1418 
1419 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1420 			soc->link_desc_banks[i].base_vaddr_unaligned) +
1421 			((unsigned long)(
1422 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1423 			link_desc_align));
1424 
1425 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
1426 			soc->link_desc_banks[i].base_paddr_unaligned) +
1427 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1428 			(unsigned long)(
1429 			soc->link_desc_banks[i].base_vaddr_unaligned));
1430 
1431 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
1432 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1433 				FL("Link descriptor memory alloc failed"));
1434 			goto fail;
1435 		}
1436 	}
1437 
1438 	if (last_bank_size) {
1439 		/* Allocate last bank in case total memory required is not exact
1440 		 * multiple of max_alloc_size
1441 		 */
1442 		soc->link_desc_banks[i].base_vaddr_unaligned =
1443 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1444 			last_bank_size,
1445 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1446 		soc->link_desc_banks[i].size = last_bank_size;
1447 
1448 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1449 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
1450 			((unsigned long)(
1451 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1452 			link_desc_align));
1453 
1454 		soc->link_desc_banks[i].base_paddr =
1455 			(unsigned long)(
1456 			soc->link_desc_banks[i].base_paddr_unaligned) +
1457 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1458 			(unsigned long)(
1459 			soc->link_desc_banks[i].base_vaddr_unaligned));
1460 	}
1461 
1462 
1463 	/* Allocate and setup link descriptor idle list for HW internal use */
1464 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1465 	total_mem_size = entry_size * total_link_descs;
1466 
1467 	if (total_mem_size <= max_alloc_size) {
1468 		void *desc;
1469 
1470 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
1471 			WBM_IDLE_LINK, 0, 0, total_link_descs)) {
1472 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1473 				FL("Link desc idle ring setup failed"));
1474 			goto fail;
1475 		}
1476 
1477 		hal_srng_access_start_unlocked(soc->hal_soc,
1478 			soc->wbm_idle_link_ring.hal_srng);
1479 
1480 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1481 			soc->link_desc_banks[i].base_paddr; i++) {
1482 			uint32_t num_entries = (soc->link_desc_banks[i].size -
1483 				((unsigned long)(
1484 				soc->link_desc_banks[i].base_vaddr) -
1485 				(unsigned long)(
1486 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1487 				/ link_desc_size;
1488 			unsigned long paddr = (unsigned long)(
1489 				soc->link_desc_banks[i].base_paddr);
1490 
1491 			while (num_entries && (desc = hal_srng_src_get_next(
1492 				soc->hal_soc,
1493 				soc->wbm_idle_link_ring.hal_srng))) {
1494 				hal_set_link_desc_addr(desc,
1495 					LINK_DESC_COOKIE(desc_id, i), paddr);
1496 				num_entries--;
1497 				desc_id++;
1498 				paddr += link_desc_size;
1499 			}
1500 		}
1501 		hal_srng_access_end_unlocked(soc->hal_soc,
1502 			soc->wbm_idle_link_ring.hal_srng);
1503 	} else {
1504 		uint32_t num_scatter_bufs;
1505 		uint32_t num_entries_per_buf;
1506 		uint32_t rem_entries;
1507 		uint8_t *scatter_buf_ptr;
1508 		uint16_t scatter_buf_num;
1509 
1510 		soc->wbm_idle_scatter_buf_size =
1511 			hal_idle_list_scatter_buf_size(soc->hal_soc);
1512 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
1513 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
1514 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1515 					soc->hal_soc, total_mem_size,
1516 					soc->wbm_idle_scatter_buf_size);
1517 
1518 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
1519 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1520 					FL("scatter bufs size out of bounds"));
1521 			goto fail;
1522 		}
1523 
1524 		for (i = 0; i < num_scatter_bufs; i++) {
1525 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
1526 				qdf_mem_alloc_consistent(soc->osdev,
1527 							soc->osdev->dev,
1528 				soc->wbm_idle_scatter_buf_size,
1529 				&(soc->wbm_idle_scatter_buf_base_paddr[i]));
1530 			if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
1531 				QDF_TRACE(QDF_MODULE_ID_DP,
1532 						QDF_TRACE_LEVEL_ERROR,
1533 					FL("Scatter list memory alloc failed"));
1534 				goto fail;
1535 			}
1536 		}
1537 
1538 		/* Populate idle list scatter buffers with link descriptor
1539 		 * pointers
1540 		 */
1541 		scatter_buf_num = 0;
1542 		scatter_buf_ptr = (uint8_t *)(
1543 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
1544 		rem_entries = num_entries_per_buf;
1545 
1546 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1547 			soc->link_desc_banks[i].base_paddr; i++) {
1548 			uint32_t num_link_descs =
1549 				(soc->link_desc_banks[i].size -
1550 				((unsigned long)(
1551 				soc->link_desc_banks[i].base_vaddr) -
1552 				(unsigned long)(
1553 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1554 				/ link_desc_size;
1555 			unsigned long paddr = (unsigned long)(
1556 				soc->link_desc_banks[i].base_paddr);
1557 
1558 			while (num_link_descs) {
1559 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
1560 					LINK_DESC_COOKIE(desc_id, i), paddr);
1561 				num_link_descs--;
1562 				desc_id++;
1563 				paddr += link_desc_size;
1564 				rem_entries--;
1565 				if (rem_entries) {
1566 					scatter_buf_ptr += entry_size;
1567 				} else {
1568 					rem_entries = num_entries_per_buf;
1569 					scatter_buf_num++;
1570 
1571 					if (scatter_buf_num >= num_scatter_bufs)
1572 						break;
1573 
1574 					scatter_buf_ptr = (uint8_t *)(
1575 						soc->wbm_idle_scatter_buf_base_vaddr[
1576 						scatter_buf_num]);
1577 				}
1578 			}
1579 		}
1580 		/* Setup link descriptor idle list in HW */
1581 		hal_setup_link_idle_list(soc->hal_soc,
1582 			soc->wbm_idle_scatter_buf_base_paddr,
1583 			soc->wbm_idle_scatter_buf_base_vaddr,
1584 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
1585 			(uint32_t)(scatter_buf_ptr -
1586 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
1587 			scatter_buf_num-1])), total_link_descs);
1588 	}
1589 	return 0;
1590 
1591 fail:
1592 	if (soc->wbm_idle_link_ring.hal_srng) {
1593 		dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
1594 			WBM_IDLE_LINK, 0);
1595 	}
1596 
1597 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1598 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1599 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1600 				soc->wbm_idle_scatter_buf_size,
1601 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1602 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1603 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1604 		}
1605 	}
1606 
1607 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1608 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1609 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1610 				soc->link_desc_banks[i].size,
1611 				soc->link_desc_banks[i].base_vaddr_unaligned,
1612 				soc->link_desc_banks[i].base_paddr_unaligned,
1613 				0);
1614 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1615 		}
1616 	}
1617 	return QDF_STATUS_E_FAILURE;
1618 }
1619 
1620 /*
1621  * Free link descriptor pool that was setup HW
1622  */
1623 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
1624 {
1625 	int i;
1626 
1627 	if (soc->wbm_idle_link_ring.hal_srng) {
1628 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
1629 			WBM_IDLE_LINK, 0);
1630 	}
1631 
1632 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1633 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1634 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1635 				soc->wbm_idle_scatter_buf_size,
1636 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1637 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1638 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1639 		}
1640 	}
1641 
1642 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1643 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1644 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1645 				soc->link_desc_banks[i].size,
1646 				soc->link_desc_banks[i].base_vaddr_unaligned,
1647 				soc->link_desc_banks[i].base_paddr_unaligned,
1648 				0);
1649 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1650 		}
1651 	}
1652 }
1653 
1654 /* TODO: Following should be configurable */
1655 #define WBM_RELEASE_RING_SIZE 64
1656 #define TCL_CMD_RING_SIZE 32
1657 #define TCL_STATUS_RING_SIZE 32
1658 #if defined(QCA_WIFI_QCA6290)
1659 #define REO_DST_RING_SIZE 1024
1660 #else
1661 #define REO_DST_RING_SIZE 2048
1662 #endif
1663 #define REO_REINJECT_RING_SIZE 32
1664 #define RX_RELEASE_RING_SIZE 1024
1665 #define REO_EXCEPTION_RING_SIZE 128
1666 #define REO_CMD_RING_SIZE 64
1667 #define REO_STATUS_RING_SIZE 128
1668 #define RXDMA_BUF_RING_SIZE 1024
1669 #define RXDMA_REFILL_RING_SIZE 4096
1670 #define RXDMA_MONITOR_BUF_RING_SIZE 4096
1671 #define RXDMA_MONITOR_DST_RING_SIZE 2048
1672 #define RXDMA_MONITOR_STATUS_RING_SIZE 1024
1673 #define RXDMA_MONITOR_DESC_RING_SIZE 4096
1674 #define RXDMA_ERR_DST_RING_SIZE 1024
1675 
1676 /*
1677  * dp_wds_aging_timer_fn() - Timer callback function for WDS aging
1678  * @soc: Datapath SOC handle
1679  *
1680  * This is a timer function used to age out stale AST nodes from
1681  * AST table
1682  */
1683 #ifdef FEATURE_WDS
1684 static void dp_wds_aging_timer_fn(void *soc_hdl)
1685 {
1686 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
1687 	struct dp_pdev *pdev;
1688 	struct dp_vdev *vdev;
1689 	struct dp_peer *peer;
1690 	struct dp_ast_entry *ase, *temp_ase;
1691 	int i;
1692 
1693 	qdf_spin_lock_bh(&soc->ast_lock);
1694 
1695 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1696 		pdev = soc->pdev_list[i];
1697 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
1698 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1699 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1700 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
1701 					/*
1702 					 * Do not expire static ast entries
1703 					 * and HM WDS entries
1704 					 */
1705 					if (ase->type != CDP_TXRX_AST_TYPE_WDS)
1706 						continue;
1707 
1708 					if (ase->is_active) {
1709 						ase->is_active = FALSE;
1710 						continue;
1711 					}
1712 
1713 					DP_STATS_INC(soc, ast.aged_out, 1);
1714 					dp_peer_del_ast(soc, ase);
1715 				}
1716 			}
1717 		}
1718 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1719 	}
1720 
1721 	qdf_spin_unlock_bh(&soc->ast_lock);
1722 
1723 	if (qdf_atomic_read(&soc->cmn_init_done))
1724 		qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1725 }
1726 
1727 
1728 /*
1729  * dp_soc_wds_attach() - Setup WDS timer and AST table
1730  * @soc:		Datapath SOC handle
1731  *
1732  * Return: None
1733  */
1734 static void dp_soc_wds_attach(struct dp_soc *soc)
1735 {
1736 	qdf_timer_init(soc->osdev, &soc->wds_aging_timer,
1737 			dp_wds_aging_timer_fn, (void *)soc,
1738 			QDF_TIMER_TYPE_WAKE_APPS);
1739 
1740 	qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1741 }
1742 
1743 /*
1744  * dp_soc_wds_detach() - Detach WDS data structures and timers
1745  * @txrx_soc: DP SOC handle
1746  *
1747  * Return: None
1748  */
1749 static void dp_soc_wds_detach(struct dp_soc *soc)
1750 {
1751 	qdf_timer_stop(&soc->wds_aging_timer);
1752 	qdf_timer_free(&soc->wds_aging_timer);
1753 }
1754 #else
1755 static void dp_soc_wds_attach(struct dp_soc *soc)
1756 {
1757 }
1758 
1759 static void dp_soc_wds_detach(struct dp_soc *soc)
1760 {
1761 }
1762 #endif
1763 
1764 /*
1765  * dp_soc_reset_ring_map() - Reset cpu ring map
1766  * @soc: Datapath soc handler
1767  *
1768  * This api resets the default cpu ring map
1769  */
1770 
1771 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
1772 {
1773 	uint8_t i;
1774 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1775 
1776 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
1777 		if (nss_config == 1) {
1778 			/*
1779 			 * Setting Tx ring map for one nss offloaded radio
1780 			 */
1781 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
1782 		} else if (nss_config == 2) {
1783 			/*
1784 			 * Setting Tx ring for two nss offloaded radios
1785 			 */
1786 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
1787 		} else {
1788 			/*
1789 			 * Setting Tx ring map for all nss offloaded radios
1790 			 */
1791 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_ALL_RADIO_OFFLOADED_MAP][i];
1792 		}
1793 	}
1794 }
1795 
1796 /*
1797  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
1798  * @dp_soc - DP soc handle
1799  * @ring_type - ring type
1800  * @ring_num - ring_num
1801  *
1802  * return 0 or 1
1803  */
1804 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
1805 {
1806 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1807 	uint8_t status = 0;
1808 
1809 	switch (ring_type) {
1810 	case WBM2SW_RELEASE:
1811 	case REO_DST:
1812 	case RXDMA_BUF:
1813 		status = ((nss_config) & (1 << ring_num));
1814 		break;
1815 	default:
1816 		break;
1817 	}
1818 
1819 	return status;
1820 }
1821 
1822 /*
1823  * dp_soc_reset_intr_mask() - reset interrupt mask
1824  * @dp_soc - DP Soc handle
1825  *
1826  * Return: Return void
1827  */
1828 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
1829 {
1830 	uint8_t j;
1831 	int *grp_mask = NULL;
1832 	int group_number, mask, num_ring;
1833 
1834 	/* number of tx ring */
1835 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
1836 
1837 	/*
1838 	 * group mask for tx completion  ring.
1839 	 */
1840 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1841 
1842 	/* loop and reset the mask for only offloaded ring */
1843 	for (j = 0; j < num_ring; j++) {
1844 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
1845 			continue;
1846 		}
1847 
1848 		/*
1849 		 * Group number corresponding to tx offloaded ring.
1850 		 */
1851 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1852 		if (group_number < 0) {
1853 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1854 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
1855 					WBM2SW_RELEASE, j);
1856 			return;
1857 		}
1858 
1859 		/* reset the tx mask for offloaded ring */
1860 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
1861 		mask &= (~(1 << j));
1862 
1863 		/*
1864 		 * reset the interrupt mask for offloaded ring.
1865 		 */
1866 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1867 	}
1868 
1869 	/* number of rx rings */
1870 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
1871 
1872 	/*
1873 	 * group mask for reo destination ring.
1874 	 */
1875 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1876 
1877 	/* loop and reset the mask for only offloaded ring */
1878 	for (j = 0; j < num_ring; j++) {
1879 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
1880 			continue;
1881 		}
1882 
1883 		/*
1884 		 * Group number corresponding to rx offloaded ring.
1885 		 */
1886 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1887 		if (group_number < 0) {
1888 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1889 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
1890 					REO_DST, j);
1891 			return;
1892 		}
1893 
1894 		/* set the interrupt mask for offloaded ring */
1895 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
1896 		mask &= (~(1 << j));
1897 
1898 		/*
1899 		 * set the interrupt mask to zero for rx offloaded radio.
1900 		 */
1901 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1902 	}
1903 
1904 	/*
1905 	 * group mask for Rx buffer refill ring
1906 	 */
1907 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1908 
1909 	/* loop and reset the mask for only offloaded ring */
1910 	for (j = 0; j < MAX_PDEV_CNT; j++) {
1911 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
1912 			continue;
1913 		}
1914 
1915 		/*
1916 		 * Group number corresponding to rx offloaded ring.
1917 		 */
1918 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1919 		if (group_number < 0) {
1920 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1921 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
1922 					REO_DST, j);
1923 			return;
1924 		}
1925 
1926 		/* set the interrupt mask for offloaded ring */
1927 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1928 				group_number);
1929 		mask &= (~(1 << j));
1930 
1931 		/*
1932 		 * set the interrupt mask to zero for rx offloaded radio.
1933 		 */
1934 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1935 			group_number, mask);
1936 	}
1937 }
1938 
1939 #ifdef IPA_OFFLOAD
1940 /**
1941  * dp_reo_remap_config() - configure reo remap register value based
1942  *                         nss configuration.
1943  *		based on offload_radio value below remap configuration
1944  *		get applied.
1945  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
1946  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
1947  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
1948  *		3 - both Radios handled by NSS (remap not required)
1949  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
1950  *
1951  * @remap1: output parameter indicates reo remap 1 register value
1952  * @remap2: output parameter indicates reo remap 2 register value
1953  * Return: bool type, true if remap is configured else false.
1954  */
1955 static bool dp_reo_remap_config(struct dp_soc *soc,
1956 				uint32_t *remap1,
1957 				uint32_t *remap2)
1958 {
1959 
1960 	*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
1961 		(0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
1962 
1963 	*remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
1964 		(0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
1965 
1966 	return true;
1967 }
1968 #else
1969 static bool dp_reo_remap_config(struct dp_soc *soc,
1970 				uint32_t *remap1,
1971 				uint32_t *remap2)
1972 {
1973 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1974 
1975 	switch (offload_radio) {
1976 	case 0:
1977 		*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
1978 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
1979 			(0x3 << 18) | (0x4 << 21)) << 8;
1980 
1981 		*remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
1982 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
1983 			(0x3 << 18) | (0x4 << 21)) << 8;
1984 		break;
1985 
1986 	case 1:
1987 		*remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
1988 			(0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
1989 			(0x2 << 18) | (0x3 << 21)) << 8;
1990 
1991 		*remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
1992 			(0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
1993 			(0x4 << 18) | (0x2 << 21)) << 8;
1994 		break;
1995 
1996 	case 2:
1997 		*remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
1998 			(0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
1999 			(0x1 << 18) | (0x3 << 21)) << 8;
2000 
2001 		*remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
2002 			(0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
2003 			(0x4 << 18) | (0x1 << 21)) << 8;
2004 		break;
2005 
2006 	case 3:
2007 		/* return false if both radios are offloaded to NSS */
2008 		return false;
2009 	}
2010 	return true;
2011 }
2012 #endif
2013 
2014 /*
2015  * dp_reo_frag_dst_set() - configure reo register to set the
2016  *                        fragment destination ring
2017  * @soc : Datapath soc
2018  * @frag_dst_ring : output parameter to set fragment destination ring
2019  *
2020  * Based on offload_radio below fragment destination rings is selected
2021  * 0 - TCL
2022  * 1 - SW1
2023  * 2 - SW2
2024  * 3 - SW3
2025  * 4 - SW4
2026  * 5 - Release
2027  * 6 - FW
2028  * 7 - alternate select
2029  *
2030  * return: void
2031  */
2032 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2033 {
2034 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2035 
2036 	switch (offload_radio) {
2037 	case 0:
2038 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2039 		break;
2040 	case 3:
2041 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2042 		break;
2043 	default:
2044 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2045 				FL("dp_reo_frag_dst_set invalid offload radio config"));
2046 		break;
2047 	}
2048 }
2049 
2050 /*
2051  * dp_soc_cmn_setup() - Common SoC level initializion
2052  * @soc:		Datapath SOC handle
2053  *
2054  * This is an internal function used to setup common SOC data structures,
2055  * to be called from PDEV attach after receiving HW mode capabilities from FW
2056  */
2057 static int dp_soc_cmn_setup(struct dp_soc *soc)
2058 {
2059 	int i;
2060 	struct hal_reo_params reo_params;
2061 	int tx_ring_size;
2062 	int tx_comp_ring_size;
2063 
2064 	if (qdf_atomic_read(&soc->cmn_init_done))
2065 		return 0;
2066 
2067 	if (dp_hw_link_desc_pool_setup(soc))
2068 		goto fail1;
2069 
2070 	/* Setup SRNG rings */
2071 	/* Common rings */
2072 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
2073 		WBM_RELEASE_RING_SIZE)) {
2074 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2075 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
2076 		goto fail1;
2077 	}
2078 
2079 
2080 	soc->num_tcl_data_rings = 0;
2081 	/* Tx data rings */
2082 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2083 		soc->num_tcl_data_rings =
2084 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2085 		tx_comp_ring_size =
2086 			wlan_cfg_tx_comp_ring_size(soc->wlan_cfg_ctx);
2087 		tx_ring_size =
2088 			wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
2089 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2090 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
2091 				TCL_DATA, i, 0, tx_ring_size)) {
2092 				QDF_TRACE(QDF_MODULE_ID_DP,
2093 					QDF_TRACE_LEVEL_ERROR,
2094 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
2095 				goto fail1;
2096 			}
2097 			/*
2098 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2099 			 * count
2100 			 */
2101 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
2102 				WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
2103 				QDF_TRACE(QDF_MODULE_ID_DP,
2104 					QDF_TRACE_LEVEL_ERROR,
2105 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
2106 				goto fail1;
2107 			}
2108 		}
2109 	} else {
2110 		/* This will be incremented during per pdev ring setup */
2111 		soc->num_tcl_data_rings = 0;
2112 	}
2113 
2114 	if (dp_tx_soc_attach(soc)) {
2115 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2116 				FL("dp_tx_soc_attach failed"));
2117 		goto fail1;
2118 	}
2119 
2120 	/* TCL command and status rings */
2121 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
2122 		TCL_CMD_RING_SIZE)) {
2123 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2124 			FL("dp_srng_setup failed for tcl_cmd_ring"));
2125 		goto fail1;
2126 	}
2127 
2128 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
2129 		TCL_STATUS_RING_SIZE)) {
2130 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2131 			FL("dp_srng_setup failed for tcl_status_ring"));
2132 		goto fail1;
2133 	}
2134 
2135 
2136 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2137 	 * descriptors
2138 	 */
2139 
2140 	/* Rx data rings */
2141 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
2142 		soc->num_reo_dest_rings =
2143 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2144 		QDF_TRACE(QDF_MODULE_ID_DP,
2145 			QDF_TRACE_LEVEL_ERROR,
2146 			FL("num_reo_dest_rings %d\n"), soc->num_reo_dest_rings);
2147 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2148 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2149 				i, 0, REO_DST_RING_SIZE)) {
2150 				QDF_TRACE(QDF_MODULE_ID_DP,
2151 					QDF_TRACE_LEVEL_ERROR,
2152 					FL("dp_srng_setup failed for reo_dest_ring[%d]"), i);
2153 				goto fail1;
2154 			}
2155 		}
2156 	} else {
2157 		/* This will be incremented during per pdev ring setup */
2158 		soc->num_reo_dest_rings = 0;
2159 	}
2160 
2161 	/* LMAC RxDMA to SW Rings configuration */
2162 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2163 		/* Only valid for MCL */
2164 		struct dp_pdev *pdev = soc->pdev_list[0];
2165 
2166 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2167 			if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2168 				RXDMA_DST, 0, i, RXDMA_ERR_DST_RING_SIZE)) {
2169 				QDF_TRACE(QDF_MODULE_ID_DP,
2170 					QDF_TRACE_LEVEL_ERROR,
2171 					FL("dp_srng_setup failed for rxdma_err_dst_ring"));
2172 				goto fail1;
2173 			}
2174 		}
2175 	}
2176 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
2177 
2178 	/* REO reinjection ring */
2179 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2180 		REO_REINJECT_RING_SIZE)) {
2181 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2182 			FL("dp_srng_setup failed for reo_reinject_ring"));
2183 		goto fail1;
2184 	}
2185 
2186 
2187 	/* Rx release ring */
2188 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2189 		RX_RELEASE_RING_SIZE)) {
2190 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2191 			FL("dp_srng_setup failed for rx_rel_ring"));
2192 		goto fail1;
2193 	}
2194 
2195 
2196 	/* Rx exception ring */
2197 	if (dp_srng_setup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0,
2198 		MAX_REO_DEST_RINGS, REO_EXCEPTION_RING_SIZE)) {
2199 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2200 			FL("dp_srng_setup failed for reo_exception_ring"));
2201 		goto fail1;
2202 	}
2203 
2204 
2205 	/* REO command and status rings */
2206 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
2207 		REO_CMD_RING_SIZE)) {
2208 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2209 			FL("dp_srng_setup failed for reo_cmd_ring"));
2210 		goto fail1;
2211 	}
2212 
2213 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2214 	TAILQ_INIT(&soc->rx.reo_cmd_list);
2215 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2216 
2217 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
2218 		REO_STATUS_RING_SIZE)) {
2219 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2220 			FL("dp_srng_setup failed for reo_status_ring"));
2221 		goto fail1;
2222 	}
2223 
2224 	qdf_spinlock_create(&soc->ast_lock);
2225 	dp_soc_wds_attach(soc);
2226 
2227 	/* Reset the cpu ring map if radio is NSS offloaded */
2228 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2229 		dp_soc_reset_cpu_ring_map(soc);
2230 		dp_soc_reset_intr_mask(soc);
2231 	}
2232 
2233 	/* Setup HW REO */
2234 	qdf_mem_zero(&reo_params, sizeof(reo_params));
2235 
2236 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2237 
2238 		/*
2239 		 * Reo ring remap is not required if both radios
2240 		 * are offloaded to NSS
2241 		 */
2242 		if (!dp_reo_remap_config(soc,
2243 					&reo_params.remap1,
2244 					&reo_params.remap2))
2245 			goto out;
2246 
2247 		reo_params.rx_hash_enabled = true;
2248 	}
2249 
2250 	/* setup the global rx defrag waitlist */
2251 	TAILQ_INIT(&soc->rx.defrag.waitlist);
2252 	soc->rx.defrag.timeout_ms =
2253 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
2254 	soc->rx.flags.defrag_timeout_check =
2255 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
2256 
2257 out:
2258 	/*
2259 	 * set the fragment destination ring
2260 	 */
2261 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2262 
2263 	hal_reo_setup(soc->hal_soc, &reo_params);
2264 
2265 	qdf_atomic_set(&soc->cmn_init_done, 1);
2266 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
2267 	return 0;
2268 fail1:
2269 	/*
2270 	 * Cleanup will be done as part of soc_detach, which will
2271 	 * be called on pdev attach failure
2272 	 */
2273 	return QDF_STATUS_E_FAILURE;
2274 }
2275 
2276 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
2277 
2278 static void dp_lro_hash_setup(struct dp_soc *soc)
2279 {
2280 	struct cdp_lro_hash_config lro_hash;
2281 
2282 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2283 		!wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2284 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2285 			 FL("LRO disabled RX hash disabled"));
2286 		return;
2287 	}
2288 
2289 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2290 
2291 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
2292 		lro_hash.lro_enable = 1;
2293 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2294 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
2295 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2296 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
2297 	}
2298 
2299 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, FL("enabled"));
2300 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
2301 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2302 		 LRO_IPV4_SEED_ARR_SZ));
2303 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2304 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2305 		 LRO_IPV6_SEED_ARR_SZ));
2306 
2307 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2308 		 "lro_hash: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
2309 		 lro_hash.lro_enable, lro_hash.tcp_flag,
2310 		 lro_hash.tcp_flag_mask);
2311 
2312 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2313 		 QDF_TRACE_LEVEL_ERROR,
2314 		 (void *)lro_hash.toeplitz_hash_ipv4,
2315 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2316 		 LRO_IPV4_SEED_ARR_SZ));
2317 
2318 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2319 		 QDF_TRACE_LEVEL_ERROR,
2320 		 (void *)lro_hash.toeplitz_hash_ipv6,
2321 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2322 		 LRO_IPV6_SEED_ARR_SZ));
2323 
2324 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2325 
2326 	if (soc->cdp_soc.ol_ops->lro_hash_config)
2327 		(void)soc->cdp_soc.ol_ops->lro_hash_config
2328 			(soc->ctrl_psoc, &lro_hash);
2329 }
2330 
2331 /*
2332 * dp_rxdma_ring_setup() - configure the RX DMA rings
2333 * @soc: data path SoC handle
2334 * @pdev: Physical device handle
2335 *
2336 * Return: 0 - success, > 0 - failure
2337 */
2338 #ifdef QCA_HOST2FW_RXBUF_RING
2339 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2340 	 struct dp_pdev *pdev)
2341 {
2342 	int max_mac_rings =
2343 		 wlan_cfg_get_num_mac_rings
2344 			(pdev->wlan_cfg_ctx);
2345 	int i;
2346 
2347 	for (i = 0; i < max_mac_rings; i++) {
2348 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2349 			 "%s: pdev_id %d mac_id %d\n",
2350 			 __func__, pdev->pdev_id, i);
2351 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
2352 			 RXDMA_BUF, 1, i, RXDMA_BUF_RING_SIZE)) {
2353 			QDF_TRACE(QDF_MODULE_ID_DP,
2354 				 QDF_TRACE_LEVEL_ERROR,
2355 				 FL("failed rx mac ring setup"));
2356 			return QDF_STATUS_E_FAILURE;
2357 		}
2358 	}
2359 	return QDF_STATUS_SUCCESS;
2360 }
2361 #else
2362 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2363 	 struct dp_pdev *pdev)
2364 {
2365 	return QDF_STATUS_SUCCESS;
2366 }
2367 #endif
2368 
2369 /**
2370  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2371  * @pdev - DP_PDEV handle
2372  *
2373  * Return: void
2374  */
2375 static inline void
2376 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2377 {
2378 	uint8_t map_id;
2379 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2380 		qdf_mem_copy(pdev->dscp_tid_map[map_id], default_dscp_tid_map,
2381 				sizeof(default_dscp_tid_map));
2382 	}
2383 	for (map_id = 0; map_id < HAL_MAX_HW_DSCP_TID_MAPS; map_id++) {
2384 		hal_tx_set_dscp_tid_map(pdev->soc->hal_soc,
2385 				pdev->dscp_tid_map[map_id],
2386 				map_id);
2387 	}
2388 }
2389 
2390 #ifdef QCA_SUPPORT_SON
2391 /**
2392  * dp_mark_peer_inact(): Update peer inactivity status
2393  * @peer_handle - datapath peer handle
2394  *
2395  * Return: void
2396  */
2397 void dp_mark_peer_inact(void *peer_handle, bool inactive)
2398 {
2399 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2400 	struct dp_pdev *pdev;
2401 	struct dp_soc *soc;
2402 	bool inactive_old;
2403 
2404 	if (!peer)
2405 		return;
2406 
2407 	pdev = peer->vdev->pdev;
2408 	soc = pdev->soc;
2409 
2410 	inactive_old = peer->peer_bs_inact_flag == 1;
2411 	if (!inactive)
2412 		peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2413 	peer->peer_bs_inact_flag = inactive ? 1 : 0;
2414 
2415 	if (inactive_old != inactive) {
2416 		/**
2417 		 * Note: a node lookup can happen in RX datapath context
2418 		 * when a node changes from inactive to active (at most once
2419 		 * per inactivity timeout threshold)
2420 		 */
2421 		if (soc->cdp_soc.ol_ops->record_act_change) {
2422 			soc->cdp_soc.ol_ops->record_act_change(pdev->osif_pdev,
2423 					peer->mac_addr.raw, !inactive);
2424 		}
2425 	}
2426 }
2427 
2428 /**
2429  * dp_txrx_peer_find_inact_timeout_handler(): Inactivity timeout function
2430  *
2431  * Periodically checks the inactivity status
2432  */
2433 static os_timer_func(dp_txrx_peer_find_inact_timeout_handler)
2434 {
2435 	struct dp_pdev *pdev;
2436 	struct dp_vdev *vdev;
2437 	struct dp_peer *peer;
2438 	struct dp_soc *soc;
2439 	int i;
2440 
2441 	OS_GET_TIMER_ARG(soc, struct dp_soc *);
2442 
2443 	qdf_spin_lock(&soc->peer_ref_mutex);
2444 
2445 	for (i = 0; i < soc->pdev_count; i++) {
2446 	pdev = soc->pdev_list[i];
2447 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
2448 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2449 		if (vdev->opmode != wlan_op_mode_ap)
2450 			continue;
2451 
2452 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2453 			if (!peer->authorize) {
2454 				/**
2455 				 * Inactivity check only interested in
2456 				 * connected node
2457 				 */
2458 				continue;
2459 			}
2460 			if (peer->peer_bs_inact > soc->pdev_bs_inact_reload) {
2461 				/**
2462 				 * This check ensures we do not wait extra long
2463 				 * due to the potential race condition
2464 				 */
2465 				peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2466 			}
2467 			if (peer->peer_bs_inact > 0) {
2468 				/* Do not let it wrap around */
2469 				peer->peer_bs_inact--;
2470 			}
2471 			if (peer->peer_bs_inact == 0)
2472 				dp_mark_peer_inact(peer, true);
2473 		}
2474 	}
2475 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2476 	}
2477 
2478 	qdf_spin_unlock(&soc->peer_ref_mutex);
2479 	qdf_timer_mod(&soc->pdev_bs_inact_timer,
2480 		      soc->pdev_bs_inact_interval * 1000);
2481 }
2482 
2483 
2484 /**
2485  * dp_free_inact_timer(): free inact timer
2486  * @timer - inact timer handle
2487  *
2488  * Return: bool
2489  */
2490 void dp_free_inact_timer(struct dp_soc *soc)
2491 {
2492 	qdf_timer_free(&soc->pdev_bs_inact_timer);
2493 }
2494 #else
2495 
2496 void dp_mark_peer_inact(void *peer, bool inactive)
2497 {
2498 	return;
2499 }
2500 
2501 void dp_free_inact_timer(struct dp_soc *soc)
2502 {
2503 	return;
2504 }
2505 
2506 #endif
2507 
2508 #ifdef IPA_OFFLOAD
2509 /**
2510  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
2511  * @soc: data path instance
2512  * @pdev: core txrx pdev context
2513  *
2514  * Return: QDF_STATUS_SUCCESS: success
2515  *         QDF_STATUS_E_RESOURCES: Error return
2516  */
2517 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2518 					   struct dp_pdev *pdev)
2519 {
2520 	/* Setup second Rx refill buffer ring */
2521 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2522 			  IPA_RX_REFILL_BUF_RING_IDX,
2523 			  pdev->pdev_id, RXDMA_REFILL_RING_SIZE)) {
2524 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2525 			FL("dp_srng_setup failed second rx refill ring"));
2526 		return QDF_STATUS_E_FAILURE;
2527 	}
2528 	return QDF_STATUS_SUCCESS;
2529 }
2530 
2531 /**
2532  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
2533  * @soc: data path instance
2534  * @pdev: core txrx pdev context
2535  *
2536  * Return: void
2537  */
2538 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2539 					      struct dp_pdev *pdev)
2540 {
2541 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2542 			IPA_RX_REFILL_BUF_RING_IDX);
2543 }
2544 
2545 #else
2546 
2547 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2548 					   struct dp_pdev *pdev)
2549 {
2550 	return QDF_STATUS_SUCCESS;
2551 }
2552 
2553 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2554 					      struct dp_pdev *pdev)
2555 {
2556 }
2557 
2558 #endif
2559 
2560 /*
2561 * dp_pdev_attach_wifi3() - attach txrx pdev
2562 * @ctrl_pdev: Opaque PDEV object
2563 * @txrx_soc: Datapath SOC handle
2564 * @htc_handle: HTC handle for host-target interface
2565 * @qdf_osdev: QDF OS device
2566 * @pdev_id: PDEV ID
2567 *
2568 * Return: DP PDEV handle on success, NULL on failure
2569 */
2570 static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
2571 	struct cdp_cfg *ctrl_pdev,
2572 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
2573 {
2574 	int tx_ring_size;
2575 	int tx_comp_ring_size;
2576 
2577 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2578 	struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
2579 	int mac_id;
2580 
2581 	if (!pdev) {
2582 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2583 			FL("DP PDEV memory allocation failed"));
2584 		goto fail0;
2585 	}
2586 
2587 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach();
2588 
2589 	if (!pdev->wlan_cfg_ctx) {
2590 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2591 			FL("pdev cfg_attach failed"));
2592 
2593 		qdf_mem_free(pdev);
2594 		goto fail0;
2595 	}
2596 
2597 	/*
2598 	 * set nss pdev config based on soc config
2599 	 */
2600 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
2601 			(wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx) & (1 << pdev_id)));
2602 
2603 	pdev->soc = soc;
2604 	pdev->osif_pdev = ctrl_pdev;
2605 	pdev->pdev_id = pdev_id;
2606 	soc->pdev_list[pdev_id] = pdev;
2607 	soc->pdev_count++;
2608 
2609 	TAILQ_INIT(&pdev->vdev_list);
2610 	qdf_spinlock_create(&pdev->vdev_list_lock);
2611 	pdev->vdev_count = 0;
2612 
2613 	qdf_spinlock_create(&pdev->tx_mutex);
2614 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
2615 	TAILQ_INIT(&pdev->neighbour_peers_list);
2616 
2617 	if (dp_soc_cmn_setup(soc)) {
2618 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2619 			FL("dp_soc_cmn_setup failed"));
2620 		goto fail1;
2621 	}
2622 
2623 	/* Setup per PDEV TCL rings if configured */
2624 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2625 		tx_ring_size =
2626 			wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
2627 		tx_comp_ring_size =
2628 			wlan_cfg_tx_comp_ring_size(soc->wlan_cfg_ctx);
2629 
2630 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
2631 			pdev_id, pdev_id, tx_ring_size)) {
2632 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2633 				FL("dp_srng_setup failed for tcl_data_ring"));
2634 			goto fail1;
2635 		}
2636 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
2637 			WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
2638 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2639 				FL("dp_srng_setup failed for tx_comp_ring"));
2640 			goto fail1;
2641 		}
2642 		soc->num_tcl_data_rings++;
2643 	}
2644 
2645 	/* Tx specific init */
2646 	if (dp_tx_pdev_attach(pdev)) {
2647 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2648 			FL("dp_tx_pdev_attach failed"));
2649 		goto fail1;
2650 	}
2651 
2652 	/* Setup per PDEV REO rings if configured */
2653 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
2654 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
2655 			pdev_id, pdev_id, REO_DST_RING_SIZE)) {
2656 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2657 				FL("dp_srng_setup failed for reo_dest_ringn"));
2658 			goto fail1;
2659 		}
2660 		soc->num_reo_dest_rings++;
2661 
2662 	}
2663 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
2664 		RXDMA_REFILL_RING_SIZE)) {
2665 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2666 			 FL("dp_srng_setup failed rx refill ring"));
2667 		goto fail1;
2668 	}
2669 
2670 	if (dp_rxdma_ring_setup(soc, pdev)) {
2671 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2672 			 FL("RXDMA ring config failed"));
2673 		goto fail1;
2674 	}
2675 
2676 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
2677 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
2678 
2679 		if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
2680 			RXDMA_MONITOR_BUF, 0, mac_for_pdev,
2681 			RXDMA_MONITOR_BUF_RING_SIZE)) {
2682 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2683 			  FL("dp_srng_setup failed for rxdma_mon_buf_ring"));
2684 			goto fail1;
2685 		}
2686 
2687 		if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
2688 			RXDMA_MONITOR_DST, 0, mac_for_pdev,
2689 			RXDMA_MONITOR_DST_RING_SIZE)) {
2690 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2691 			  FL("dp_srng_setup failed for rxdma_mon_dst_ring"));
2692 			goto fail1;
2693 		}
2694 
2695 
2696 		if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring[mac_id],
2697 			RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
2698 			RXDMA_MONITOR_STATUS_RING_SIZE)) {
2699 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2700 			 FL("dp_srng_setup failed for rxdma_mon_status_ring"));
2701 			goto fail1;
2702 		}
2703 
2704 		if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
2705 			RXDMA_MONITOR_DESC, 0, mac_for_pdev,
2706 			RXDMA_MONITOR_DESC_RING_SIZE)) {
2707 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2708 			  "dp_srng_setup failed for rxdma_mon_desc_ring\n");
2709 			goto fail1;
2710 		}
2711 	}
2712 
2713 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2714 		if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
2715 				  0, pdev_id, RXDMA_ERR_DST_RING_SIZE)) {
2716 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2717 				FL("dp_srng_setup failed for rxdma_err_dst_ring"));
2718 			goto fail1;
2719 		}
2720 	}
2721 
2722 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
2723 		goto fail1;
2724 
2725 	if (dp_ipa_ring_resource_setup(soc, pdev))
2726 		goto fail1;
2727 
2728 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
2729 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2730 			FL("dp_ipa_uc_attach failed"));
2731 		goto fail1;
2732 	}
2733 
2734 	/* Rx specific init */
2735 	if (dp_rx_pdev_attach(pdev)) {
2736 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2737 			FL("dp_rx_pdev_attach failed"));
2738 		goto fail0;
2739 	}
2740 	DP_STATS_INIT(pdev);
2741 
2742 	/* Monitor filter init */
2743 	pdev->mon_filter_mode = MON_FILTER_ALL;
2744 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
2745 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
2746 	pdev->fp_data_filter = FILTER_DATA_ALL;
2747 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
2748 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
2749 	pdev->mo_data_filter = FILTER_DATA_ALL;
2750 
2751 #ifndef CONFIG_WIN
2752 	/* MCL */
2753 	dp_local_peer_id_pool_init(pdev);
2754 #endif
2755 	dp_dscp_tid_map_setup(pdev);
2756 
2757 	/* Rx monitor mode specific init */
2758 	if (dp_rx_pdev_mon_attach(pdev)) {
2759 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2760 				"dp_rx_pdev_attach failed\n");
2761 		goto fail1;
2762 	}
2763 
2764 	if (dp_wdi_event_attach(pdev)) {
2765 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2766 				"dp_wdi_evet_attach failed\n");
2767 		goto fail1;
2768 	}
2769 
2770 	/* set the reo destination during initialization */
2771 	pdev->reo_dest = pdev->pdev_id + 1;
2772 
2773 	/*
2774 	 * initialize ppdu tlv list
2775 	 */
2776 	TAILQ_INIT(&pdev->ppdu_info_list);
2777 	pdev->tlv_count = 0;
2778 	pdev->list_depth = 0;
2779 
2780 	return (struct cdp_pdev *)pdev;
2781 
2782 fail1:
2783 	dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
2784 
2785 fail0:
2786 	return NULL;
2787 }
2788 
2789 /*
2790 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
2791 * @soc: data path SoC handle
2792 * @pdev: Physical device handle
2793 *
2794 * Return: void
2795 */
2796 #ifdef QCA_HOST2FW_RXBUF_RING
2797 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
2798 	 struct dp_pdev *pdev)
2799 {
2800 	int max_mac_rings =
2801 		 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
2802 	int i;
2803 
2804 	max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
2805 				max_mac_rings : MAX_RX_MAC_RINGS;
2806 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
2807 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
2808 			 RXDMA_BUF, 1);
2809 
2810 	qdf_timer_free(&soc->mon_reap_timer);
2811 }
2812 #else
2813 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
2814 	 struct dp_pdev *pdev)
2815 {
2816 }
2817 #endif
2818 
2819 /*
2820  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
2821  * @pdev: device object
2822  *
2823  * Return: void
2824  */
2825 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
2826 {
2827 	struct dp_neighbour_peer *peer = NULL;
2828 	struct dp_neighbour_peer *temp_peer = NULL;
2829 
2830 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
2831 			neighbour_peer_list_elem, temp_peer) {
2832 		/* delete this peer from the list */
2833 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
2834 				peer, neighbour_peer_list_elem);
2835 		qdf_mem_free(peer);
2836 	}
2837 
2838 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
2839 }
2840 
2841 /**
2842 * dp_htt_ppdu_stats_detach() - detach stats resources
2843 * @pdev: Datapath PDEV handle
2844 *
2845 * Return: void
2846 */
2847 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
2848 {
2849 	struct ppdu_info *ppdu_info, *ppdu_info_next;
2850 
2851 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
2852 			ppdu_info_list_elem, ppdu_info_next) {
2853 		if (!ppdu_info)
2854 			break;
2855 		qdf_assert_always(ppdu_info->nbuf);
2856 		qdf_nbuf_free(ppdu_info->nbuf);
2857 		qdf_mem_free(ppdu_info);
2858 	}
2859 }
2860 
2861 /*
2862 * dp_pdev_detach_wifi3() - detach txrx pdev
2863 * @txrx_pdev: Datapath PDEV handle
2864 * @force: Force detach
2865 *
2866 */
2867 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
2868 {
2869 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
2870 	struct dp_soc *soc = pdev->soc;
2871 	qdf_nbuf_t curr_nbuf, next_nbuf;
2872 	int mac_id;
2873 
2874 	dp_wdi_event_detach(pdev);
2875 
2876 	dp_tx_pdev_detach(pdev);
2877 
2878 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2879 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
2880 			TCL_DATA, pdev->pdev_id);
2881 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
2882 			WBM2SW_RELEASE, pdev->pdev_id);
2883 	}
2884 
2885 	dp_pktlogmod_exit(pdev);
2886 
2887 	dp_rx_pdev_detach(pdev);
2888 
2889 	dp_rx_pdev_mon_detach(pdev);
2890 
2891 	dp_neighbour_peers_detach(pdev);
2892 	qdf_spinlock_destroy(&pdev->tx_mutex);
2893 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
2894 
2895 	dp_ipa_uc_detach(soc, pdev);
2896 
2897 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
2898 
2899 	/* Cleanup per PDEV REO rings if configured */
2900 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
2901 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
2902 			REO_DST, pdev->pdev_id);
2903 	}
2904 
2905 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
2906 
2907 	dp_rxdma_ring_cleanup(soc, pdev);
2908 
2909 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
2910 		dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
2911 			RXDMA_MONITOR_BUF, 0);
2912 
2913 		dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
2914 			RXDMA_MONITOR_DST, 0);
2915 
2916 		dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring[mac_id],
2917 			RXDMA_MONITOR_STATUS, 0);
2918 
2919 		dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
2920 			RXDMA_MONITOR_DESC, 0);
2921 
2922 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
2923 			RXDMA_DST, 0);
2924 	}
2925 
2926 	curr_nbuf = pdev->invalid_peer_head_msdu;
2927 	while (curr_nbuf) {
2928 		next_nbuf = qdf_nbuf_next(curr_nbuf);
2929 		qdf_nbuf_free(curr_nbuf);
2930 		curr_nbuf = next_nbuf;
2931 	}
2932 
2933 	dp_htt_ppdu_stats_detach(pdev);
2934 
2935 	soc->pdev_list[pdev->pdev_id] = NULL;
2936 	soc->pdev_count--;
2937 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
2938 	qdf_mem_free(pdev->dp_txrx_handle);
2939 	qdf_mem_free(pdev);
2940 }
2941 
2942 /*
2943  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
2944  * @soc: DP SOC handle
2945  */
2946 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
2947 {
2948 	struct reo_desc_list_node *desc;
2949 	struct dp_rx_tid *rx_tid;
2950 
2951 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2952 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
2953 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
2954 		rx_tid = &desc->rx_tid;
2955 		qdf_mem_unmap_nbytes_single(soc->osdev,
2956 			rx_tid->hw_qdesc_paddr,
2957 			QDF_DMA_BIDIRECTIONAL,
2958 			rx_tid->hw_qdesc_alloc_size);
2959 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2960 		qdf_mem_free(desc);
2961 	}
2962 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2963 	qdf_list_destroy(&soc->reo_desc_freelist);
2964 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
2965 }
2966 
2967 /*
2968  * dp_soc_detach_wifi3() - Detach txrx SOC
2969  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
2970  */
2971 static void dp_soc_detach_wifi3(void *txrx_soc)
2972 {
2973 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2974 	int i;
2975 
2976 	qdf_atomic_set(&soc->cmn_init_done, 0);
2977 
2978 	qdf_flush_work(&soc->htt_stats.work);
2979 	qdf_disable_work(&soc->htt_stats.work);
2980 
2981 	/* Free pending htt stats messages */
2982 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
2983 
2984 	dp_free_inact_timer(soc);
2985 
2986 	for (i = 0; i < MAX_PDEV_CNT; i++) {
2987 		if (soc->pdev_list[i])
2988 			dp_pdev_detach_wifi3(
2989 				(struct cdp_pdev *)soc->pdev_list[i], 1);
2990 	}
2991 
2992 	dp_peer_find_detach(soc);
2993 
2994 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
2995 	 * SW descriptors
2996 	 */
2997 
2998 	/* Free the ring memories */
2999 	/* Common rings */
3000 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3001 
3002 	dp_tx_soc_detach(soc);
3003 	/* Tx data rings */
3004 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3005 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3006 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
3007 				TCL_DATA, i);
3008 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
3009 				WBM2SW_RELEASE, i);
3010 		}
3011 	}
3012 
3013 	/* TCL command and status rings */
3014 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3015 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3016 
3017 	/* Rx data rings */
3018 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3019 		soc->num_reo_dest_rings =
3020 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3021 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
3022 			/* TODO: Get number of rings and ring sizes
3023 			 * from wlan_cfg
3024 			 */
3025 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
3026 				REO_DST, i);
3027 		}
3028 	}
3029 	/* REO reinjection ring */
3030 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3031 
3032 	/* Rx release ring */
3033 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3034 
3035 	/* Rx exception ring */
3036 	/* TODO: Better to store ring_type and ring_num in
3037 	 * dp_srng during setup
3038 	 */
3039 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3040 
3041 	/* REO command and status rings */
3042 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3043 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
3044 	dp_hw_link_desc_pool_cleanup(soc);
3045 
3046 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
3047 	qdf_spinlock_destroy(&soc->htt_stats.lock);
3048 
3049 	htt_soc_detach(soc->htt_handle);
3050 
3051 	dp_reo_cmdlist_destroy(soc);
3052 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3053 	dp_reo_desc_freelist_destroy(soc);
3054 
3055 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
3056 
3057 	dp_soc_wds_detach(soc);
3058 	qdf_spinlock_destroy(&soc->ast_lock);
3059 
3060 	qdf_mem_free(soc);
3061 }
3062 
3063 /*
3064  * dp_rxdma_ring_config() - configure the RX DMA rings
3065  *
3066  * This function is used to configure the MAC rings.
3067  * On MCL host provides buffers in Host2FW ring
3068  * FW refills (copies) buffers to the ring and updates
3069  * ring_idx in register
3070  *
3071  * @soc: data path SoC handle
3072  *
3073  * Return: void
3074  */
3075 #ifdef QCA_HOST2FW_RXBUF_RING
3076 static void dp_rxdma_ring_config(struct dp_soc *soc)
3077 {
3078 	int i;
3079 
3080 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3081 		struct dp_pdev *pdev = soc->pdev_list[i];
3082 
3083 		if (pdev) {
3084 			int mac_id;
3085 			bool dbs_enable = 0;
3086 			int max_mac_rings =
3087 				 wlan_cfg_get_num_mac_rings
3088 				(pdev->wlan_cfg_ctx);
3089 
3090 			htt_srng_setup(soc->htt_handle, 0,
3091 				 pdev->rx_refill_buf_ring.hal_srng,
3092 				 RXDMA_BUF);
3093 
3094 			if (pdev->rx_refill_buf_ring2.hal_srng)
3095 				htt_srng_setup(soc->htt_handle, 0,
3096 					pdev->rx_refill_buf_ring2.hal_srng,
3097 					RXDMA_BUF);
3098 
3099 			if (soc->cdp_soc.ol_ops->
3100 				is_hw_dbs_2x2_capable) {
3101 				dbs_enable = soc->cdp_soc.ol_ops->
3102 					is_hw_dbs_2x2_capable(soc->ctrl_psoc);
3103 			}
3104 
3105 			if (dbs_enable) {
3106 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3107 				QDF_TRACE_LEVEL_ERROR,
3108 				FL("DBS enabled max_mac_rings %d\n"),
3109 					 max_mac_rings);
3110 			} else {
3111 				max_mac_rings = 1;
3112 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3113 					 QDF_TRACE_LEVEL_ERROR,
3114 					 FL("DBS disabled, max_mac_rings %d\n"),
3115 					 max_mac_rings);
3116 			}
3117 
3118 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3119 					 FL("pdev_id %d max_mac_rings %d\n"),
3120 					 pdev->pdev_id, max_mac_rings);
3121 
3122 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
3123 				int mac_for_pdev = dp_get_mac_id_for_pdev(
3124 							mac_id, pdev->pdev_id);
3125 
3126 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3127 					 QDF_TRACE_LEVEL_ERROR,
3128 					 FL("mac_id %d\n"), mac_for_pdev);
3129 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3130 					 pdev->rx_mac_buf_ring[mac_id]
3131 						.hal_srng,
3132 					 RXDMA_BUF);
3133 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3134 					pdev->rxdma_err_dst_ring[mac_id]
3135 						.hal_srng,
3136 					RXDMA_DST);
3137 
3138 				/* Configure monitor mode rings */
3139 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3140 				   pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3141 				   RXDMA_MONITOR_BUF);
3142 
3143 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3144 				   pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3145 				   RXDMA_MONITOR_DST);
3146 
3147 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3148 				  pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3149 				  RXDMA_MONITOR_STATUS);
3150 
3151 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3152 				  pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3153 				  RXDMA_MONITOR_DESC);
3154 			}
3155 		}
3156 	}
3157 
3158 	/*
3159 	 * Timer to reap rxdma status rings.
3160 	 * Needed until we enable ppdu end interrupts
3161 	 */
3162 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
3163 			dp_service_mon_rings, (void *)soc,
3164 			QDF_TIMER_TYPE_WAKE_APPS);
3165 	soc->reap_timer_init = 1;
3166 }
3167 #else
3168 /* This is only for WIN */
3169 static void dp_rxdma_ring_config(struct dp_soc *soc)
3170 {
3171 	int i;
3172 	int mac_id;
3173 
3174 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3175 		struct dp_pdev *pdev = soc->pdev_list[i];
3176 
3177 		if (pdev == NULL)
3178 			continue;
3179 
3180 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3181 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
3182 
3183 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3184 				pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
3185 
3186 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3187 				pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3188 				RXDMA_MONITOR_BUF);
3189 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3190 				pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3191 				RXDMA_MONITOR_DST);
3192 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3193 				pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3194 				RXDMA_MONITOR_STATUS);
3195 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3196 				pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3197 				RXDMA_MONITOR_DESC);
3198 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3199 				pdev->rxdma_err_dst_ring[mac_id].hal_srng,
3200 				RXDMA_DST);
3201 		}
3202 	}
3203 }
3204 #endif
3205 
3206 /*
3207  * dp_soc_attach_target_wifi3() - SOC initialization in the target
3208  * @txrx_soc: Datapath SOC handle
3209  */
3210 static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
3211 {
3212 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
3213 
3214 	htt_soc_attach_target(soc->htt_handle);
3215 
3216 	dp_rxdma_ring_config(soc);
3217 
3218 	DP_STATS_INIT(soc);
3219 
3220 	/* initialize work queue for stats processing */
3221 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
3222 
3223 	return 0;
3224 }
3225 
3226 /*
3227  * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
3228  * @txrx_soc: Datapath SOC handle
3229  */
3230 static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
3231 {
3232 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3233 	return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
3234 }
3235 /*
3236  * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
3237  * @txrx_soc: Datapath SOC handle
3238  * @nss_cfg: nss config
3239  */
3240 static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
3241 {
3242 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3243 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
3244 
3245 	wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
3246 
3247 	/*
3248 	 * TODO: masked out based on the per offloaded radio
3249 	 */
3250 	if (config == dp_nss_cfg_dbdc) {
3251 		wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
3252 		wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
3253 		wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
3254 		wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
3255 	}
3256 
3257 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3258 				FL("nss-wifi<0> nss config is enabled"));
3259 }
3260 /*
3261 * dp_vdev_attach_wifi3() - attach txrx vdev
3262 * @txrx_pdev: Datapath PDEV handle
3263 * @vdev_mac_addr: MAC address of the virtual interface
3264 * @vdev_id: VDEV Id
3265 * @wlan_op_mode: VDEV operating mode
3266 *
3267 * Return: DP VDEV handle on success, NULL on failure
3268 */
3269 static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
3270 	uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
3271 {
3272 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3273 	struct dp_soc *soc = pdev->soc;
3274 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
3275 
3276 	if (!vdev) {
3277 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3278 			FL("DP VDEV memory allocation failed"));
3279 		goto fail0;
3280 	}
3281 
3282 	vdev->pdev = pdev;
3283 	vdev->vdev_id = vdev_id;
3284 	vdev->opmode = op_mode;
3285 	vdev->osdev = soc->osdev;
3286 
3287 	vdev->osif_rx = NULL;
3288 	vdev->osif_rsim_rx_decap = NULL;
3289 	vdev->osif_get_key = NULL;
3290 	vdev->osif_rx_mon = NULL;
3291 	vdev->osif_tx_free_ext = NULL;
3292 	vdev->osif_vdev = NULL;
3293 
3294 	vdev->delete.pending = 0;
3295 	vdev->safemode = 0;
3296 	vdev->drop_unenc = 1;
3297 	vdev->sec_type = cdp_sec_type_none;
3298 #ifdef notyet
3299 	vdev->filters_num = 0;
3300 #endif
3301 
3302 	qdf_mem_copy(
3303 		&vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3304 
3305 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3306 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3307 	vdev->dscp_tid_map_id = 0;
3308 	vdev->mcast_enhancement_en = 0;
3309 
3310 	/* TODO: Initialize default HTT meta data that will be used in
3311 	 * TCL descriptors for packets transmitted from this VDEV
3312 	 */
3313 
3314 	TAILQ_INIT(&vdev->peer_list);
3315 
3316 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3317 	/* add this vdev into the pdev's list */
3318 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
3319 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3320 	pdev->vdev_count++;
3321 
3322 	dp_tx_vdev_attach(vdev);
3323 
3324 
3325 	if ((soc->intr_mode == DP_INTR_POLL) &&
3326 			wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
3327 		if (pdev->vdev_count == 1)
3328 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
3329 	}
3330 
3331 	dp_lro_hash_setup(soc);
3332 
3333 	/* LRO */
3334 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3335 		wlan_op_mode_sta == vdev->opmode)
3336 		vdev->lro_enable = true;
3337 
3338 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3339 		 "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable);
3340 
3341 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3342 		"Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
3343 	DP_STATS_INIT(vdev);
3344 
3345 	if (wlan_op_mode_sta == vdev->opmode)
3346 		dp_peer_create_wifi3((struct cdp_vdev *)vdev,
3347 							vdev->mac_addr.raw);
3348 
3349 	return (struct cdp_vdev *)vdev;
3350 
3351 fail0:
3352 	return NULL;
3353 }
3354 
3355 /**
3356  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
3357  * @vdev: Datapath VDEV handle
3358  * @osif_vdev: OSIF vdev handle
3359  * @txrx_ops: Tx and Rx operations
3360  *
3361  * Return: DP VDEV handle on success, NULL on failure
3362  */
3363 static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
3364 	void *osif_vdev,
3365 	struct ol_txrx_ops *txrx_ops)
3366 {
3367 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3368 	vdev->osif_vdev = osif_vdev;
3369 	vdev->osif_rx = txrx_ops->rx.rx;
3370 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
3371 	vdev->osif_get_key = txrx_ops->get_key;
3372 	vdev->osif_rx_mon = txrx_ops->rx.mon;
3373 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
3374 #ifdef notyet
3375 #if ATH_SUPPORT_WAPI
3376 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
3377 #endif
3378 #endif
3379 #ifdef UMAC_SUPPORT_PROXY_ARP
3380 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
3381 #endif
3382 	vdev->me_convert = txrx_ops->me_convert;
3383 
3384 	/* TODO: Enable the following once Tx code is integrated */
3385 	if (vdev->mesh_vdev)
3386 		txrx_ops->tx.tx = dp_tx_send_mesh;
3387 	else
3388 		txrx_ops->tx.tx = dp_tx_send;
3389 
3390 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
3391 
3392 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
3393 		"DP Vdev Register success");
3394 }
3395 
3396 /**
3397  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
3398  * @vdev: Datapath VDEV handle
3399  *
3400  * Return: void
3401  */
3402 static void dp_vdev_flush_peers(struct dp_vdev *vdev)
3403 {
3404 	struct dp_pdev *pdev = vdev->pdev;
3405 	struct dp_soc *soc = pdev->soc;
3406 	struct dp_peer *peer;
3407 	uint16_t *peer_ids;
3408 	uint8_t i = 0, j = 0;
3409 
3410 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
3411 	if (!peer_ids) {
3412 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3413 			"DP alloc failure - unable to flush peers");
3414 		return;
3415 	}
3416 
3417 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3418 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3419 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3420 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
3421 				if (j < soc->max_peers)
3422 					peer_ids[j++] = peer->peer_ids[i];
3423 	}
3424 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3425 
3426 	for (i = 0; i < j ; i++)
3427 		dp_rx_peer_unmap_handler(soc, peer_ids[i]);
3428 
3429 	qdf_mem_free(peer_ids);
3430 
3431 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3432 		FL("Flushed peers for vdev object %pK "), vdev);
3433 }
3434 
3435 /*
3436  * dp_vdev_detach_wifi3() - Detach txrx vdev
3437  * @txrx_vdev:		Datapath VDEV handle
3438  * @callback:		Callback OL_IF on completion of detach
3439  * @cb_context:	Callback context
3440  *
3441  */
3442 static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
3443 	ol_txrx_vdev_delete_cb callback, void *cb_context)
3444 {
3445 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3446 	struct dp_pdev *pdev = vdev->pdev;
3447 	struct dp_soc *soc = pdev->soc;
3448 
3449 	/* preconditions */
3450 	qdf_assert(vdev);
3451 
3452 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3453 	/* remove the vdev from its parent pdev's list */
3454 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
3455 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3456 
3457 	if (wlan_op_mode_sta == vdev->opmode)
3458 		dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
3459 
3460 	/*
3461 	 * If Target is hung, flush all peers before detaching vdev
3462 	 * this will free all references held due to missing
3463 	 * unmap commands from Target
3464 	 */
3465 	if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
3466 		dp_vdev_flush_peers(vdev);
3467 
3468 	/*
3469 	 * Use peer_ref_mutex while accessing peer_list, in case
3470 	 * a peer is in the process of being removed from the list.
3471 	 */
3472 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3473 	/* check that the vdev has no peers allocated */
3474 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
3475 		/* debug print - will be removed later */
3476 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
3477 			FL("not deleting vdev object %pK (%pM)"
3478 			"until deletion finishes for all its peers"),
3479 			vdev, vdev->mac_addr.raw);
3480 		/* indicate that the vdev needs to be deleted */
3481 		vdev->delete.pending = 1;
3482 		vdev->delete.callback = callback;
3483 		vdev->delete.context = cb_context;
3484 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3485 		return;
3486 	}
3487 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3488 
3489 	dp_tx_vdev_detach(vdev);
3490 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3491 		FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
3492 
3493 	qdf_mem_free(vdev);
3494 
3495 	if (callback)
3496 		callback(cb_context);
3497 }
3498 
3499 /*
3500  * dp_peer_create_wifi3() - attach txrx peer
3501  * @txrx_vdev: Datapath VDEV handle
3502  * @peer_mac_addr: Peer MAC address
3503  *
3504  * Return: DP peeer handle on success, NULL on failure
3505  */
3506 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
3507 		uint8_t *peer_mac_addr)
3508 {
3509 	struct dp_peer *peer;
3510 	int i;
3511 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3512 	struct dp_pdev *pdev;
3513 	struct dp_soc *soc;
3514 
3515 	/* preconditions */
3516 	qdf_assert(vdev);
3517 	qdf_assert(peer_mac_addr);
3518 
3519 	pdev = vdev->pdev;
3520 	soc = pdev->soc;
3521 
3522 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr,
3523 					0, vdev->vdev_id);
3524 
3525 	if (peer) {
3526 		peer->delete_in_progress = false;
3527 
3528 		qdf_spin_lock_bh(&soc->ast_lock);
3529 		TAILQ_INIT(&peer->ast_entry_list);
3530 		qdf_spin_unlock_bh(&soc->ast_lock);
3531 
3532 		/*
3533 		* on peer create, peer ref count decrements, sice new peer is not
3534 		* getting created earlier reference is reused, peer_unref_delete will
3535 		* take care of incrementing count
3536 		* */
3537 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
3538 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->osif_pdev,
3539 				vdev->vdev_id, peer->mac_addr.raw);
3540 		}
3541 
3542 		DP_STATS_INIT(peer);
3543 		return (void *)peer;
3544 	}
3545 
3546 #ifdef notyet
3547 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
3548 		soc->mempool_ol_ath_peer);
3549 #else
3550 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
3551 #endif
3552 
3553 	if (!peer)
3554 		return NULL; /* failure */
3555 
3556 	qdf_mem_zero(peer, sizeof(struct dp_peer));
3557 
3558 	TAILQ_INIT(&peer->ast_entry_list);
3559 
3560 	/* store provided params */
3561 	peer->vdev = vdev;
3562 
3563 	dp_peer_add_ast(soc, peer, peer_mac_addr, CDP_TXRX_AST_TYPE_STATIC, 0);
3564 
3565 	qdf_spinlock_create(&peer->peer_info_lock);
3566 
3567 	qdf_mem_copy(
3568 		&peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3569 
3570 	/* TODO: See of rx_opt_proc is really required */
3571 	peer->rx_opt_proc = soc->rx_opt_proc;
3572 
3573 	/* initialize the peer_id */
3574 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3575 		peer->peer_ids[i] = HTT_INVALID_PEER;
3576 
3577 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3578 
3579 	qdf_atomic_init(&peer->ref_cnt);
3580 
3581 	/* keep one reference for attach */
3582 	qdf_atomic_inc(&peer->ref_cnt);
3583 
3584 	/* add this peer into the vdev's list */
3585 	if (wlan_op_mode_sta == vdev->opmode)
3586 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
3587 	else
3588 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
3589 
3590 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3591 
3592 	/* TODO: See if hash based search is required */
3593 	dp_peer_find_hash_add(soc, peer);
3594 
3595 	/* Initialize the peer state */
3596 	peer->state = OL_TXRX_PEER_STATE_DISC;
3597 
3598 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3599 		"vdev %pK created peer %pK (%pM) ref_cnt: %d",
3600 		vdev, peer, peer->mac_addr.raw,
3601 		qdf_atomic_read(&peer->ref_cnt));
3602 	/*
3603 	 * For every peer MAp message search and set if bss_peer
3604 	 */
3605 	if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
3606 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3607 			"vdev bss_peer!!!!");
3608 		peer->bss_peer = 1;
3609 		vdev->vap_bss_peer = peer;
3610 	}
3611 
3612 
3613 #ifndef CONFIG_WIN
3614 	dp_local_peer_id_alloc(pdev, peer);
3615 #endif
3616 	DP_STATS_INIT(peer);
3617 	return (void *)peer;
3618 }
3619 
3620 /*
3621  * dp_peer_setup_wifi3() - initialize the peer
3622  * @vdev_hdl: virtual device object
3623  * @peer: Peer object
3624  *
3625  * Return: void
3626  */
3627 static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
3628 {
3629 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
3630 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
3631 	struct dp_pdev *pdev;
3632 	struct dp_soc *soc;
3633 	bool hash_based = 0;
3634 	enum cdp_host_reo_dest_ring reo_dest;
3635 
3636 	/* preconditions */
3637 	qdf_assert(vdev);
3638 	qdf_assert(peer);
3639 
3640 	pdev = vdev->pdev;
3641 	soc = pdev->soc;
3642 
3643 	peer->last_assoc_rcvd = 0;
3644 	peer->last_disassoc_rcvd = 0;
3645 	peer->last_deauth_rcvd = 0;
3646 
3647 	/*
3648 	 * hash based steering is disabled for Radios which are offloaded
3649 	 * to NSS
3650 	 */
3651 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
3652 		hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
3653 
3654 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3655 		FL("hash based steering for pdev: %d is %d\n"),
3656 		pdev->pdev_id, hash_based);
3657 
3658 	/*
3659 	 * Below line of code will ensure the proper reo_dest ring is chosen
3660 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
3661 	 */
3662 	reo_dest = pdev->reo_dest;
3663 
3664 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
3665 		/* TODO: Check the destination ring number to be passed to FW */
3666 		soc->cdp_soc.ol_ops->peer_set_default_routing(
3667 			pdev->osif_pdev, peer->mac_addr.raw,
3668 			 peer->vdev->vdev_id, hash_based, reo_dest);
3669 	}
3670 
3671 	dp_peer_rx_init(pdev, peer);
3672 	return;
3673 }
3674 
3675 /*
3676  * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
3677  * @vdev_handle: virtual device object
3678  * @htt_pkt_type: type of pkt
3679  *
3680  * Return: void
3681  */
3682 static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
3683 	 enum htt_cmn_pkt_type val)
3684 {
3685 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3686 	vdev->tx_encap_type = val;
3687 }
3688 
3689 /*
3690  * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
3691  * @vdev_handle: virtual device object
3692  * @htt_pkt_type: type of pkt
3693  *
3694  * Return: void
3695  */
3696 static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
3697 	 enum htt_cmn_pkt_type val)
3698 {
3699 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3700 	vdev->rx_decap_type = val;
3701 }
3702 
3703 /*
3704  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
3705  * @pdev_handle: physical device object
3706  * @val: reo destination ring index (1 - 4)
3707  *
3708  * Return: void
3709  */
3710 static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
3711 	 enum cdp_host_reo_dest_ring val)
3712 {
3713 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3714 
3715 	if (pdev)
3716 		pdev->reo_dest = val;
3717 }
3718 
3719 /*
3720  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
3721  * @pdev_handle: physical device object
3722  *
3723  * Return: reo destination ring index
3724  */
3725 static enum cdp_host_reo_dest_ring
3726 dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
3727 {
3728 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3729 
3730 	if (pdev)
3731 		return pdev->reo_dest;
3732 	else
3733 		return cdp_host_reo_dest_ring_unknown;
3734 }
3735 
3736 #ifdef QCA_SUPPORT_SON
3737 static void dp_son_peer_authorize(struct dp_peer *peer)
3738 {
3739 	struct dp_soc *soc;
3740 	soc = peer->vdev->pdev->soc;
3741 	peer->peer_bs_inact_flag = 0;
3742 	peer->peer_bs_inact = soc->pdev_bs_inact_reload;
3743 	return;
3744 }
3745 #else
3746 static void dp_son_peer_authorize(struct dp_peer *peer)
3747 {
3748 	return;
3749 }
3750 #endif
3751 /*
3752  * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
3753  * @pdev_handle: device object
3754  * @val: value to be set
3755  *
3756  * Return: void
3757  */
3758 static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
3759 	 uint32_t val)
3760 {
3761 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3762 
3763 	/* Enable/Disable smart mesh filtering. This flag will be checked
3764 	 * during rx processing to check if packets are from NAC clients.
3765 	 */
3766 	pdev->filter_neighbour_peers = val;
3767 	return 0;
3768 }
3769 
3770 /*
3771  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
3772  * address for smart mesh filtering
3773  * @pdev_handle: device object
3774  * @cmd: Add/Del command
3775  * @macaddr: nac client mac address
3776  *
3777  * Return: void
3778  */
3779 static int dp_update_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
3780 	 uint32_t cmd, uint8_t *macaddr)
3781 {
3782 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3783 	struct dp_neighbour_peer *peer = NULL;
3784 
3785 	if (!macaddr)
3786 		goto fail0;
3787 
3788 	/* Store address of NAC (neighbour peer) which will be checked
3789 	 * against TA of received packets.
3790 	 */
3791 	if (cmd == DP_NAC_PARAM_ADD) {
3792 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
3793 				sizeof(*peer));
3794 
3795 		if (!peer) {
3796 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3797 				FL("DP neighbour peer node memory allocation failed"));
3798 			goto fail0;
3799 		}
3800 
3801 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
3802 			macaddr, DP_MAC_ADDR_LEN);
3803 
3804 
3805 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3806 		/* add this neighbour peer into the list */
3807 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
3808 				neighbour_peer_list_elem);
3809 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3810 
3811 		return 1;
3812 
3813 	} else if (cmd == DP_NAC_PARAM_DEL) {
3814 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3815 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
3816 				neighbour_peer_list_elem) {
3817 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
3818 				macaddr, DP_MAC_ADDR_LEN)) {
3819 				/* delete this peer from the list */
3820 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
3821 					peer, neighbour_peer_list_elem);
3822 				qdf_mem_free(peer);
3823 				break;
3824 			}
3825 		}
3826 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3827 
3828 		return 1;
3829 
3830 	}
3831 
3832 fail0:
3833 	return 0;
3834 }
3835 
3836 /*
3837  * dp_get_sec_type() - Get the security type
3838  * @peer:		Datapath peer handle
3839  * @sec_idx:    Security id (mcast, ucast)
3840  *
3841  * return sec_type: Security type
3842  */
3843 static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
3844 {
3845 	struct dp_peer *dpeer = (struct dp_peer *)peer;
3846 
3847 	return dpeer->security[sec_idx].sec_type;
3848 }
3849 
3850 /*
3851  * dp_peer_authorize() - authorize txrx peer
3852  * @peer_handle:		Datapath peer handle
3853  * @authorize
3854  *
3855  */
3856 static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
3857 {
3858 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
3859 	struct dp_soc *soc;
3860 
3861 	if (peer != NULL) {
3862 		soc = peer->vdev->pdev->soc;
3863 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
3864 		dp_son_peer_authorize(peer);
3865 		peer->authorize = authorize ? 1 : 0;
3866 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3867 	}
3868 }
3869 
3870 #ifdef QCA_SUPPORT_SON
3871 /*
3872  * dp_txrx_update_inact_threshold() - Update inact timer threshold
3873  * @pdev_handle: Device handle
3874  * @new_threshold : updated threshold value
3875  *
3876  */
3877 static void
3878 dp_txrx_update_inact_threshold(struct cdp_pdev *pdev_handle,
3879 			       u_int16_t new_threshold)
3880 {
3881 	struct dp_vdev *vdev;
3882 	struct dp_peer *peer;
3883 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3884 	struct dp_soc *soc = pdev->soc;
3885 	u_int16_t old_threshold = soc->pdev_bs_inact_reload;
3886 
3887 	if (old_threshold == new_threshold)
3888 		return;
3889 
3890 	soc->pdev_bs_inact_reload = new_threshold;
3891 
3892 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3893 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3894 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
3895 		if (vdev->opmode != wlan_op_mode_ap)
3896 			continue;
3897 
3898 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3899 			if (!peer->authorize)
3900 				continue;
3901 
3902 			if (old_threshold - peer->peer_bs_inact >=
3903 					new_threshold) {
3904 				dp_mark_peer_inact((void *)peer, true);
3905 				peer->peer_bs_inact = 0;
3906 			} else {
3907 				peer->peer_bs_inact = new_threshold -
3908 					(old_threshold - peer->peer_bs_inact);
3909 			}
3910 		}
3911 	}
3912 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3913 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3914 }
3915 
3916 /**
3917  * dp_txrx_reset_inact_count(): Reset inact count
3918  * @pdev_handle - device handle
3919  *
3920  * Return: void
3921  */
3922 static void
3923 dp_txrx_reset_inact_count(struct cdp_pdev *pdev_handle)
3924 {
3925 	struct dp_vdev *vdev = NULL;
3926 	struct dp_peer *peer = NULL;
3927 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3928 	struct dp_soc *soc = pdev->soc;
3929 
3930 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3931 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3932 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
3933 		if (vdev->opmode != wlan_op_mode_ap)
3934 			continue;
3935 
3936 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3937 			if (!peer->authorize)
3938 				continue;
3939 
3940 			peer->peer_bs_inact = soc->pdev_bs_inact_reload;
3941 		}
3942 	}
3943 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3944 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3945 }
3946 
3947 /**
3948  * dp_set_inact_params(): set inactivity params
3949  * @pdev_handle - device handle
3950  * @inact_check_interval - inactivity interval
3951  * @inact_normal - Inactivity normal
3952  * @inact_overload - Inactivity overload
3953  *
3954  * Return: bool
3955  */
3956 bool dp_set_inact_params(struct cdp_pdev *pdev_handle,
3957 			 u_int16_t inact_check_interval,
3958 			 u_int16_t inact_normal, u_int16_t inact_overload)
3959 {
3960 	struct dp_soc *soc;
3961 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3962 
3963 	if (!pdev)
3964 		return false;
3965 
3966 	soc = pdev->soc;
3967 	if (!soc)
3968 		return false;
3969 
3970 	soc->pdev_bs_inact_interval = inact_check_interval;
3971 	soc->pdev_bs_inact_normal = inact_normal;
3972 	soc->pdev_bs_inact_overload = inact_overload;
3973 
3974 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
3975 					soc->pdev_bs_inact_normal);
3976 
3977 	return true;
3978 }
3979 
3980 /**
3981  * dp_start_inact_timer(): Inactivity timer start
3982  * @pdev_handle - device handle
3983  * @enable - Inactivity timer start/stop
3984  *
3985  * Return: bool
3986  */
3987 bool dp_start_inact_timer(struct cdp_pdev *pdev_handle, bool enable)
3988 {
3989 	struct dp_soc *soc;
3990 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3991 
3992 	if (!pdev)
3993 		return false;
3994 
3995 	soc = pdev->soc;
3996 	if (!soc)
3997 		return false;
3998 
3999 	if (enable) {
4000 		dp_txrx_reset_inact_count((struct cdp_pdev *)pdev);
4001 		qdf_timer_mod(&soc->pdev_bs_inact_timer,
4002 			      soc->pdev_bs_inact_interval * 1000);
4003 	} else {
4004 		qdf_timer_stop(&soc->pdev_bs_inact_timer);
4005 	}
4006 
4007 	return true;
4008 }
4009 
4010 /**
4011  * dp_set_overload(): Set inactivity overload
4012  * @pdev_handle - device handle
4013  * @overload - overload status
4014  *
4015  * Return: void
4016  */
4017 void dp_set_overload(struct cdp_pdev *pdev_handle, bool overload)
4018 {
4019 	struct dp_soc *soc;
4020 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4021 
4022 	if (!pdev)
4023 		return;
4024 
4025 	soc = pdev->soc;
4026 	if (!soc)
4027 		return;
4028 
4029 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4030 			overload ? soc->pdev_bs_inact_overload :
4031 			soc->pdev_bs_inact_normal);
4032 }
4033 
4034 /**
4035  * dp_peer_is_inact(): check whether peer is inactive
4036  * @peer_handle - datapath peer handle
4037  *
4038  * Return: bool
4039  */
4040 bool dp_peer_is_inact(void *peer_handle)
4041 {
4042 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4043 
4044 	if (!peer)
4045 		return false;
4046 
4047 	return peer->peer_bs_inact_flag == 1;
4048 }
4049 
4050 /**
4051  * dp_init_inact_timer: initialize the inact timer
4052  * @soc - SOC handle
4053  *
4054  * Return: void
4055  */
4056 void dp_init_inact_timer(struct dp_soc *soc)
4057 {
4058 	qdf_timer_init(soc->osdev, &soc->pdev_bs_inact_timer,
4059 		dp_txrx_peer_find_inact_timeout_handler,
4060 		(void *)soc, QDF_TIMER_TYPE_WAKE_APPS);
4061 }
4062 
4063 #else
4064 
4065 bool dp_set_inact_params(struct cdp_pdev *pdev, u_int16_t inact_check_interval,
4066 			 u_int16_t inact_normal, u_int16_t inact_overload)
4067 {
4068 	return false;
4069 }
4070 
4071 bool dp_start_inact_timer(struct cdp_pdev *pdev, bool enable)
4072 {
4073 	return false;
4074 }
4075 
4076 void dp_set_overload(struct cdp_pdev *pdev, bool overload)
4077 {
4078 	return;
4079 }
4080 
4081 void dp_init_inact_timer(struct dp_soc *soc)
4082 {
4083 	return;
4084 }
4085 
4086 bool dp_peer_is_inact(void *peer)
4087 {
4088 	return false;
4089 }
4090 #endif
4091 
4092 /*
4093  * dp_peer_unref_delete() - unref and delete peer
4094  * @peer_handle:		Datapath peer handle
4095  *
4096  */
4097 void dp_peer_unref_delete(void *peer_handle)
4098 {
4099 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4100 	struct dp_peer *bss_peer = NULL;
4101 	struct dp_vdev *vdev = peer->vdev;
4102 	struct dp_pdev *pdev = vdev->pdev;
4103 	struct dp_soc *soc = pdev->soc;
4104 	struct dp_peer *tmppeer;
4105 	int found = 0;
4106 	uint16_t peer_id;
4107 	uint16_t vdev_id;
4108 
4109 	/*
4110 	 * Hold the lock all the way from checking if the peer ref count
4111 	 * is zero until the peer references are removed from the hash
4112 	 * table and vdev list (if the peer ref count is zero).
4113 	 * This protects against a new HL tx operation starting to use the
4114 	 * peer object just after this function concludes it's done being used.
4115 	 * Furthermore, the lock needs to be held while checking whether the
4116 	 * vdev's list of peers is empty, to make sure that list is not modified
4117 	 * concurrently with the empty check.
4118 	 */
4119 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4120 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4121 		  "%s: peer %pK ref_cnt(before decrement): %d\n", __func__,
4122 		  peer, qdf_atomic_read(&peer->ref_cnt));
4123 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
4124 		peer_id = peer->peer_ids[0];
4125 		vdev_id = vdev->vdev_id;
4126 
4127 		/*
4128 		 * Make sure that the reference to the peer in
4129 		 * peer object map is removed
4130 		 */
4131 		if (peer_id != HTT_INVALID_PEER)
4132 			soc->peer_id_to_obj_map[peer_id] = NULL;
4133 
4134 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4135 			"Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
4136 
4137 		/* remove the reference to the peer from the hash table */
4138 		dp_peer_find_hash_remove(soc, peer);
4139 
4140 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
4141 			if (tmppeer == peer) {
4142 				found = 1;
4143 				break;
4144 			}
4145 		}
4146 		if (found) {
4147 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
4148 				peer_list_elem);
4149 		} else {
4150 			/*Ignoring the remove operation as peer not found*/
4151 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
4152 				"peer %pK not found in vdev (%pK)->peer_list:%pK",
4153 				peer, vdev, &peer->vdev->peer_list);
4154 		}
4155 
4156 		/* cleanup the peer data */
4157 		dp_peer_cleanup(vdev, peer);
4158 
4159 		/* check whether the parent vdev has no peers left */
4160 		if (TAILQ_EMPTY(&vdev->peer_list)) {
4161 			/*
4162 			 * Now that there are no references to the peer, we can
4163 			 * release the peer reference lock.
4164 			 */
4165 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4166 			/*
4167 			 * Check if the parent vdev was waiting for its peers
4168 			 * to be deleted, in order for it to be deleted too.
4169 			 */
4170 			if (vdev->delete.pending) {
4171 				ol_txrx_vdev_delete_cb vdev_delete_cb =
4172 					vdev->delete.callback;
4173 				void *vdev_delete_context =
4174 					vdev->delete.context;
4175 
4176 				QDF_TRACE(QDF_MODULE_ID_DP,
4177 					QDF_TRACE_LEVEL_INFO_HIGH,
4178 					FL("deleting vdev object %pK (%pM)"
4179 					" - its last peer is done"),
4180 					vdev, vdev->mac_addr.raw);
4181 				/* all peers are gone, go ahead and delete it */
4182 				dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
4183 								FLOW_TYPE_VDEV,
4184 								vdev_id);
4185 				dp_tx_vdev_detach(vdev);
4186 				QDF_TRACE(QDF_MODULE_ID_DP,
4187 					QDF_TRACE_LEVEL_INFO_HIGH,
4188 					FL("deleting vdev object %pK (%pM)"),
4189 					vdev, vdev->mac_addr.raw);
4190 
4191 				qdf_mem_free(vdev);
4192 				vdev = NULL;
4193 				if (vdev_delete_cb)
4194 					vdev_delete_cb(vdev_delete_context);
4195 			}
4196 		} else {
4197 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4198 		}
4199 
4200 		if (vdev) {
4201 			if (vdev->vap_bss_peer == peer) {
4202 				vdev->vap_bss_peer = NULL;
4203 			}
4204 		}
4205 
4206 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
4207 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->osif_pdev,
4208 					vdev_id, peer->mac_addr.raw);
4209 		}
4210 
4211 		if (!vdev || !vdev->vap_bss_peer) {
4212 			goto free_peer;
4213 		}
4214 
4215 #ifdef notyet
4216 		qdf_mempool_free(soc->osdev, soc->mempool_ol_ath_peer, peer);
4217 #else
4218 		bss_peer = vdev->vap_bss_peer;
4219 		DP_UPDATE_STATS(bss_peer, peer);
4220 
4221 free_peer:
4222 		qdf_mem_free(peer);
4223 
4224 #endif
4225 	} else {
4226 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4227 	}
4228 }
4229 
4230 /*
4231  * dp_peer_detach_wifi3() – Detach txrx peer
4232  * @peer_handle: Datapath peer handle
4233  * @bitmap: bitmap indicating special handling of request.
4234  *
4235  */
4236 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
4237 {
4238 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4239 
4240 	/* redirect the peer's rx delivery function to point to a
4241 	 * discard func
4242 	 */
4243 
4244 	peer->rx_opt_proc = dp_rx_discard;
4245 
4246 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4247 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
4248 
4249 #ifndef CONFIG_WIN
4250 	dp_local_peer_id_free(peer->vdev->pdev, peer);
4251 #endif
4252 	qdf_spinlock_destroy(&peer->peer_info_lock);
4253 
4254 	/*
4255 	 * Remove the reference added during peer_attach.
4256 	 * The peer will still be left allocated until the
4257 	 * PEER_UNMAP message arrives to remove the other
4258 	 * reference, added by the PEER_MAP message.
4259 	 */
4260 	dp_peer_unref_delete(peer_handle);
4261 }
4262 
4263 /*
4264  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
4265  * @peer_handle:		Datapath peer handle
4266  *
4267  */
4268 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
4269 {
4270 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4271 	return vdev->mac_addr.raw;
4272 }
4273 
4274 /*
4275  * dp_vdev_set_wds() - Enable per packet stats
4276  * @vdev_handle: DP VDEV handle
4277  * @val: value
4278  *
4279  * Return: none
4280  */
4281 static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
4282 {
4283 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4284 
4285 	vdev->wds_enabled = val;
4286 	return 0;
4287 }
4288 
4289 /*
4290  * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
4291  * @peer_handle:		Datapath peer handle
4292  *
4293  */
4294 static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
4295 						uint8_t vdev_id)
4296 {
4297 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
4298 	struct dp_vdev *vdev = NULL;
4299 
4300 	if (qdf_unlikely(!pdev))
4301 		return NULL;
4302 
4303 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4304 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4305 		if (vdev->vdev_id == vdev_id)
4306 			break;
4307 	}
4308 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4309 
4310 	return (struct cdp_vdev *)vdev;
4311 }
4312 
4313 static int dp_get_opmode(struct cdp_vdev *vdev_handle)
4314 {
4315 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4316 
4317 	return vdev->opmode;
4318 }
4319 
4320 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
4321 {
4322 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4323 	struct dp_pdev *pdev = vdev->pdev;
4324 
4325 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
4326 }
4327 
4328 /**
4329  * dp_reset_monitor_mode() - Disable monitor mode
4330  * @pdev_handle: Datapath PDEV handle
4331  *
4332  * Return: 0 on success, not 0 on failure
4333  */
4334 static int dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
4335 {
4336 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4337 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4338 	struct dp_soc *soc = pdev->soc;
4339 	uint8_t pdev_id;
4340 	int mac_id;
4341 
4342 	pdev_id = pdev->pdev_id;
4343 	soc = pdev->soc;
4344 
4345 	qdf_spin_lock_bh(&pdev->mon_lock);
4346 
4347 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4348 
4349 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4350 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4351 
4352 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4353 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4354 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4355 
4356 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4357 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4358 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4359 	}
4360 
4361 	pdev->monitor_vdev = NULL;
4362 
4363 	qdf_spin_unlock_bh(&pdev->mon_lock);
4364 
4365 	return 0;
4366 }
4367 
4368 /**
4369  * dp_set_nac() - set peer_nac
4370  * @peer_handle: Datapath PEER handle
4371  *
4372  * Return: void
4373  */
4374 static void dp_set_nac(struct cdp_peer *peer_handle)
4375 {
4376 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4377 
4378 	peer->nac = 1;
4379 }
4380 
4381 /**
4382  * dp_get_tx_pending() - read pending tx
4383  * @pdev_handle: Datapath PDEV handle
4384  *
4385  * Return: outstanding tx
4386  */
4387 static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
4388 {
4389 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4390 
4391 	return qdf_atomic_read(&pdev->num_tx_outstanding);
4392 }
4393 
4394 /**
4395  * dp_get_peer_mac_from_peer_id() - get peer mac
4396  * @pdev_handle: Datapath PDEV handle
4397  * @peer_id: Peer ID
4398  * @peer_mac: MAC addr of PEER
4399  *
4400  * Return: void
4401  */
4402 static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
4403 	uint32_t peer_id, uint8_t *peer_mac)
4404 {
4405 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4406 	struct dp_peer *peer;
4407 
4408 	if (pdev && peer_mac) {
4409 		peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
4410 		if (peer && peer->mac_addr.raw) {
4411 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
4412 					DP_MAC_ADDR_LEN);
4413 		}
4414 	}
4415 }
4416 
4417 /**
4418  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
4419  * @vdev_handle: Datapath VDEV handle
4420  * @smart_monitor: Flag to denote if its smart monitor mode
4421  *
4422  * Return: 0 on success, not 0 on failure
4423  */
4424 static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
4425 		uint8_t smart_monitor)
4426 {
4427 	/* Many monitor VAPs can exists in a system but only one can be up at
4428 	 * anytime
4429 	 */
4430 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4431 	struct dp_pdev *pdev;
4432 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4433 	struct dp_soc *soc;
4434 	uint8_t pdev_id;
4435 	int mac_id;
4436 
4437 	qdf_assert(vdev);
4438 
4439 	pdev = vdev->pdev;
4440 	pdev_id = pdev->pdev_id;
4441 	soc = pdev->soc;
4442 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4443 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
4444 		pdev, pdev_id, soc, vdev);
4445 
4446 	/*Check if current pdev's monitor_vdev exists */
4447 	if (pdev->monitor_vdev) {
4448 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4449 			"vdev=%pK\n", vdev);
4450 		qdf_assert(vdev);
4451 	}
4452 
4453 	pdev->monitor_vdev = vdev;
4454 
4455 	/* If smart monitor mode, do not configure monitor ring */
4456 	if (smart_monitor)
4457 		return QDF_STATUS_SUCCESS;
4458 
4459 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4460 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]\n",
4461 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4462 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4463 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4464 		pdev->mo_data_filter);
4465 
4466 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4467 
4468 	htt_tlv_filter.mpdu_start = 1;
4469 	htt_tlv_filter.msdu_start = 1;
4470 	htt_tlv_filter.packet = 1;
4471 	htt_tlv_filter.msdu_end = 1;
4472 	htt_tlv_filter.mpdu_end = 1;
4473 	htt_tlv_filter.packet_header = 1;
4474 	htt_tlv_filter.attention = 1;
4475 	htt_tlv_filter.ppdu_start = 0;
4476 	htt_tlv_filter.ppdu_end = 0;
4477 	htt_tlv_filter.ppdu_end_user_stats = 0;
4478 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4479 	htt_tlv_filter.ppdu_end_status_done = 0;
4480 	htt_tlv_filter.header_per_msdu = 1;
4481 	htt_tlv_filter.enable_fp =
4482 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4483 	htt_tlv_filter.enable_md = 0;
4484 	htt_tlv_filter.enable_mo =
4485 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4486 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4487 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4488 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4489 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4490 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4491 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4492 
4493 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4494 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4495 
4496 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4497 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4498 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4499 	}
4500 
4501 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4502 
4503 	htt_tlv_filter.mpdu_start = 1;
4504 	htt_tlv_filter.msdu_start = 0;
4505 	htt_tlv_filter.packet = 0;
4506 	htt_tlv_filter.msdu_end = 0;
4507 	htt_tlv_filter.mpdu_end = 0;
4508 	htt_tlv_filter.attention = 0;
4509 	htt_tlv_filter.ppdu_start = 1;
4510 	htt_tlv_filter.ppdu_end = 1;
4511 	htt_tlv_filter.ppdu_end_user_stats = 1;
4512 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4513 	htt_tlv_filter.ppdu_end_status_done = 1;
4514 	htt_tlv_filter.enable_fp = 1;
4515 	htt_tlv_filter.enable_md = 0;
4516 	htt_tlv_filter.enable_mo = 1;
4517 	if (pdev->mcopy_mode) {
4518 		htt_tlv_filter.packet_header = 1;
4519 	}
4520 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
4521 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
4522 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
4523 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
4524 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
4525 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
4526 
4527 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4528 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4529 						pdev->pdev_id);
4530 
4531 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4532 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4533 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4534 	}
4535 
4536 	return QDF_STATUS_SUCCESS;
4537 }
4538 
4539 /**
4540  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
4541  * @pdev_handle: Datapath PDEV handle
4542  * @filter_val: Flag to select Filter for monitor mode
4543  * Return: 0 on success, not 0 on failure
4544  */
4545 static int dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
4546 	struct cdp_monitor_filter *filter_val)
4547 {
4548 	/* Many monitor VAPs can exists in a system but only one can be up at
4549 	 * anytime
4550 	 */
4551 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4552 	struct dp_vdev *vdev = pdev->monitor_vdev;
4553 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4554 	struct dp_soc *soc;
4555 	uint8_t pdev_id;
4556 	int mac_id;
4557 
4558 	pdev_id = pdev->pdev_id;
4559 	soc = pdev->soc;
4560 
4561 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4562 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
4563 		pdev, pdev_id, soc, vdev);
4564 
4565 	/*Check if current pdev's monitor_vdev exists */
4566 	if (!pdev->monitor_vdev) {
4567 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4568 			"vdev=%pK\n", vdev);
4569 		qdf_assert(vdev);
4570 	}
4571 
4572 	/* update filter mode, type in pdev structure */
4573 	pdev->mon_filter_mode = filter_val->mode;
4574 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
4575 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
4576 	pdev->fp_data_filter = filter_val->fp_data;
4577 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
4578 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
4579 	pdev->mo_data_filter = filter_val->mo_data;
4580 
4581 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4582 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]\n",
4583 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4584 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4585 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4586 		pdev->mo_data_filter);
4587 
4588 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4589 
4590 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4591 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4592 
4593 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4594 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4595 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4596 
4597 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4598 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4599 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4600 	}
4601 
4602 	htt_tlv_filter.mpdu_start = 1;
4603 	htt_tlv_filter.msdu_start = 1;
4604 	htt_tlv_filter.packet = 1;
4605 	htt_tlv_filter.msdu_end = 1;
4606 	htt_tlv_filter.mpdu_end = 1;
4607 	htt_tlv_filter.packet_header = 1;
4608 	htt_tlv_filter.attention = 1;
4609 	htt_tlv_filter.ppdu_start = 0;
4610 	htt_tlv_filter.ppdu_end = 0;
4611 	htt_tlv_filter.ppdu_end_user_stats = 0;
4612 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4613 	htt_tlv_filter.ppdu_end_status_done = 0;
4614 	htt_tlv_filter.header_per_msdu = 1;
4615 	htt_tlv_filter.enable_fp =
4616 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4617 	htt_tlv_filter.enable_md = 0;
4618 	htt_tlv_filter.enable_mo =
4619 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4620 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4621 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4622 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4623 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4624 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4625 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4626 
4627 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4628 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4629 
4630 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4631 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4632 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4633 	}
4634 
4635 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4636 
4637 	htt_tlv_filter.mpdu_start = 1;
4638 	htt_tlv_filter.msdu_start = 0;
4639 	htt_tlv_filter.packet = 0;
4640 	htt_tlv_filter.msdu_end = 0;
4641 	htt_tlv_filter.mpdu_end = 0;
4642 	htt_tlv_filter.attention = 0;
4643 	htt_tlv_filter.ppdu_start = 1;
4644 	htt_tlv_filter.ppdu_end = 1;
4645 	htt_tlv_filter.ppdu_end_user_stats = 1;
4646 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4647 	htt_tlv_filter.ppdu_end_status_done = 1;
4648 	htt_tlv_filter.enable_fp = 1;
4649 	htt_tlv_filter.enable_md = 0;
4650 	htt_tlv_filter.enable_mo = 1;
4651 	if (pdev->mcopy_mode) {
4652 		htt_tlv_filter.packet_header = 1;
4653 	}
4654 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
4655 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
4656 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
4657 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
4658 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
4659 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
4660 
4661 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4662 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4663 						pdev->pdev_id);
4664 
4665 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4666 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4667 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4668 	}
4669 
4670 	return QDF_STATUS_SUCCESS;
4671 }
4672 
4673 /**
4674  * dp_get_pdev_id_frm_pdev() - get pdev_id
4675  * @pdev_handle: Datapath PDEV handle
4676  *
4677  * Return: pdev_id
4678  */
4679 static
4680 uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
4681 {
4682 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4683 
4684 	return pdev->pdev_id;
4685 }
4686 
4687 /**
4688  * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
4689  * @vdev_handle: Datapath VDEV handle
4690  * Return: true on ucast filter flag set
4691  */
4692 static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
4693 {
4694 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4695 	struct dp_pdev *pdev;
4696 
4697 	pdev = vdev->pdev;
4698 
4699 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
4700 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
4701 		return true;
4702 
4703 	return false;
4704 }
4705 
4706 /**
4707  * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
4708  * @vdev_handle: Datapath VDEV handle
4709  * Return: true on mcast filter flag set
4710  */
4711 static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
4712 {
4713 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4714 	struct dp_pdev *pdev;
4715 
4716 	pdev = vdev->pdev;
4717 
4718 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
4719 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
4720 		return true;
4721 
4722 	return false;
4723 }
4724 
4725 /**
4726  * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
4727  * @vdev_handle: Datapath VDEV handle
4728  * Return: true on non data filter flag set
4729  */
4730 static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
4731 {
4732 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4733 	struct dp_pdev *pdev;
4734 
4735 	pdev = vdev->pdev;
4736 
4737 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
4738 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
4739 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
4740 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
4741 			return true;
4742 		}
4743 	}
4744 
4745 	return false;
4746 }
4747 
4748 #ifdef MESH_MODE_SUPPORT
4749 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
4750 {
4751 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4752 
4753 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4754 		FL("val %d"), val);
4755 	vdev->mesh_vdev = val;
4756 }
4757 
4758 /*
4759  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
4760  * @vdev_hdl: virtual device object
4761  * @val: value to be set
4762  *
4763  * Return: void
4764  */
4765 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
4766 {
4767 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4768 
4769 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4770 		FL("val %d"), val);
4771 	vdev->mesh_rx_filter = val;
4772 }
4773 #endif
4774 
4775 /*
4776  * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
4777  * Current scope is bar received count
4778  *
4779  * @pdev_handle: DP_PDEV handle
4780  *
4781  * Return: void
4782  */
4783 #define STATS_PROC_TIMEOUT        (HZ/1000)
4784 
4785 static void
4786 dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
4787 {
4788 	struct dp_vdev *vdev;
4789 	struct dp_peer *peer;
4790 	uint32_t waitcnt;
4791 
4792 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4793 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4794 			if (!peer) {
4795 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4796 					FL("DP Invalid Peer refernce"));
4797 				return;
4798 			}
4799 
4800 			if (peer->delete_in_progress) {
4801 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4802 					FL("DP Peer deletion in progress"));
4803 				continue;
4804 			}
4805 
4806 			qdf_atomic_inc(&peer->ref_cnt);
4807 			waitcnt = 0;
4808 			dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
4809 			while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
4810 				&& waitcnt < 10) {
4811 				schedule_timeout_interruptible(
4812 						STATS_PROC_TIMEOUT);
4813 				waitcnt++;
4814 			}
4815 			qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
4816 			dp_peer_unref_delete(peer);
4817 		}
4818 	}
4819 }
4820 
4821 /**
4822  * dp_rx_bar_stats_cb(): BAR received stats callback
4823  * @soc: SOC handle
4824  * @cb_ctxt: Call back context
4825  * @reo_status: Reo status
4826  *
4827  * return: void
4828  */
4829 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
4830 	union hal_reo_status *reo_status)
4831 {
4832 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
4833 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
4834 
4835 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
4836 		DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
4837 			queue_status->header.status);
4838 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
4839 		return;
4840 	}
4841 
4842 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
4843 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
4844 
4845 }
4846 
4847 /**
4848  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
4849  * @vdev: DP VDEV handle
4850  *
4851  * return: void
4852  */
4853 void dp_aggregate_vdev_stats(struct dp_vdev *vdev)
4854 {
4855 	struct dp_peer *peer = NULL;
4856 	struct dp_soc *soc = vdev->pdev->soc;
4857 
4858 	qdf_mem_set(&(vdev->stats.tx), sizeof(vdev->stats.tx), 0x0);
4859 	qdf_mem_set(&(vdev->stats.rx), sizeof(vdev->stats.rx), 0x0);
4860 
4861 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
4862 		DP_UPDATE_STATS(vdev, peer);
4863 
4864 	if (soc->cdp_soc.ol_ops->update_dp_stats)
4865 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->osif_pdev,
4866 			&vdev->stats, (uint16_t) vdev->vdev_id,
4867 			UPDATE_VDEV_STATS);
4868 
4869 }
4870 
4871 /**
4872  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
4873  * @pdev: DP PDEV handle
4874  *
4875  * return: void
4876  */
4877 static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
4878 {
4879 	struct dp_vdev *vdev = NULL;
4880 	struct dp_soc *soc = pdev->soc;
4881 
4882 	qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
4883 	qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
4884 	qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
4885 
4886 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4887 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4888 
4889 		dp_aggregate_vdev_stats(vdev);
4890 		DP_UPDATE_STATS(pdev, vdev);
4891 
4892 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
4893 
4894 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
4895 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
4896 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
4897 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
4898 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
4899 		DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
4900 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
4901 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host);
4902 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
4903 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host);
4904 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
4905 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
4906 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
4907 		DP_STATS_AGGR(pdev, vdev,
4908 				tx_i.mcast_en.dropped_map_error);
4909 		DP_STATS_AGGR(pdev, vdev,
4910 				tx_i.mcast_en.dropped_self_mac);
4911 		DP_STATS_AGGR(pdev, vdev,
4912 				tx_i.mcast_en.dropped_send_fail);
4913 		DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
4914 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
4915 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
4916 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
4917 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na);
4918 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
4919 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified);
4920 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw);
4921 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.exception_fw);
4922 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.completion_fw);
4923 
4924 		pdev->stats.tx_i.dropped.dropped_pkt.num =
4925 			pdev->stats.tx_i.dropped.dma_error +
4926 			pdev->stats.tx_i.dropped.ring_full +
4927 			pdev->stats.tx_i.dropped.enqueue_fail +
4928 			pdev->stats.tx_i.dropped.desc_na +
4929 			pdev->stats.tx_i.dropped.res_full;
4930 
4931 		pdev->stats.tx.last_ack_rssi =
4932 			vdev->stats.tx.last_ack_rssi;
4933 		pdev->stats.tx_i.tso.num_seg =
4934 			vdev->stats.tx_i.tso.num_seg;
4935 	}
4936 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4937 	if (soc->cdp_soc.ol_ops->update_dp_stats)
4938 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->osif_pdev,
4939 				&pdev->stats, pdev->pdev_id, UPDATE_PDEV_STATS);
4940 
4941 }
4942 
4943 /**
4944  * dp_vdev_getstats() - get vdev packet level stats
4945  * @vdev_handle: Datapath VDEV handle
4946  * @stats: cdp network device stats structure
4947  *
4948  * Return: void
4949  */
4950 static void dp_vdev_getstats(void *vdev_handle,
4951 		struct cdp_dev_stats *stats)
4952 {
4953 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4954 
4955 	dp_aggregate_vdev_stats(vdev);
4956 }
4957 
4958 
4959 /**
4960  * dp_pdev_getstats() - get pdev packet level stats
4961  * @pdev_handle: Datapath PDEV handle
4962  * @stats: cdp network device stats structure
4963  *
4964  * Return: void
4965  */
4966 static void dp_pdev_getstats(void *pdev_handle,
4967 		struct cdp_dev_stats *stats)
4968 {
4969 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4970 
4971 	dp_aggregate_pdev_stats(pdev);
4972 
4973 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
4974 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
4975 
4976 	stats->tx_errors = pdev->stats.tx.tx_failed +
4977 		pdev->stats.tx_i.dropped.dropped_pkt.num;
4978 	stats->tx_dropped = stats->tx_errors;
4979 
4980 	stats->rx_packets = pdev->stats.rx.unicast.num +
4981 		pdev->stats.rx.multicast.num +
4982 		pdev->stats.rx.bcast.num;
4983 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
4984 		pdev->stats.rx.multicast.bytes +
4985 		pdev->stats.rx.bcast.bytes;
4986 }
4987 
4988 /**
4989  * dp_get_device_stats() - get interface level packet stats
4990  * @handle: device handle
4991  * @stats: cdp network device stats structure
4992  * @type: device type pdev/vdev
4993  *
4994  * Return: void
4995  */
4996 static void dp_get_device_stats(void *handle,
4997 		struct cdp_dev_stats *stats, uint8_t type)
4998 {
4999 	switch (type) {
5000 	case UPDATE_VDEV_STATS:
5001 		dp_vdev_getstats(handle, stats);
5002 		break;
5003 	case UPDATE_PDEV_STATS:
5004 		dp_pdev_getstats(handle, stats);
5005 		break;
5006 	default:
5007 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5008 			"apstats cannot be updated for this input "
5009 			"type %d\n", type);
5010 		break;
5011 	}
5012 
5013 }
5014 
5015 
5016 /**
5017  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
5018  * @pdev: DP_PDEV Handle
5019  *
5020  * Return:void
5021  */
5022 static inline void
5023 dp_print_pdev_tx_stats(struct dp_pdev *pdev)
5024 {
5025 	uint8_t index = 0;
5026 	DP_PRINT_STATS("PDEV Tx Stats:\n");
5027 	DP_PRINT_STATS("Received From Stack:");
5028 	DP_PRINT_STATS("	Packets = %d",
5029 			pdev->stats.tx_i.rcvd.num);
5030 	DP_PRINT_STATS("	Bytes = %llu",
5031 			pdev->stats.tx_i.rcvd.bytes);
5032 	DP_PRINT_STATS("Processed:");
5033 	DP_PRINT_STATS("	Packets = %d",
5034 			pdev->stats.tx_i.processed.num);
5035 	DP_PRINT_STATS("	Bytes = %llu",
5036 			pdev->stats.tx_i.processed.bytes);
5037 	DP_PRINT_STATS("Total Completions:");
5038 	DP_PRINT_STATS("	Packets = %u",
5039 			pdev->stats.tx.comp_pkt.num);
5040 	DP_PRINT_STATS("	Bytes = %llu",
5041 			pdev->stats.tx.comp_pkt.bytes);
5042 	DP_PRINT_STATS("Successful Completions:");
5043 	DP_PRINT_STATS("	Packets = %u",
5044 			pdev->stats.tx.tx_success.num);
5045 	DP_PRINT_STATS("	Bytes = %llu",
5046 			pdev->stats.tx.tx_success.bytes);
5047 	DP_PRINT_STATS("Dropped:");
5048 	DP_PRINT_STATS("	Total = %d",
5049 			pdev->stats.tx_i.dropped.dropped_pkt.num);
5050 	DP_PRINT_STATS("	Dma_map_error = %d",
5051 			pdev->stats.tx_i.dropped.dma_error);
5052 	DP_PRINT_STATS("	Ring Full = %d",
5053 			pdev->stats.tx_i.dropped.ring_full);
5054 	DP_PRINT_STATS("	Descriptor Not available = %d",
5055 			pdev->stats.tx_i.dropped.desc_na);
5056 	DP_PRINT_STATS("	HW enqueue failed= %d",
5057 			pdev->stats.tx_i.dropped.enqueue_fail);
5058 	DP_PRINT_STATS("	Resources Full = %d",
5059 			pdev->stats.tx_i.dropped.res_full);
5060 	DP_PRINT_STATS("	FW removed = %d",
5061 			pdev->stats.tx.dropped.fw_rem);
5062 	DP_PRINT_STATS("	FW removed transmitted = %d",
5063 			pdev->stats.tx.dropped.fw_rem_tx);
5064 	DP_PRINT_STATS("	FW removed untransmitted = %d",
5065 			pdev->stats.tx.dropped.fw_rem_notx);
5066 	DP_PRINT_STATS("	FW removed untransmitted fw_reason1 = %d",
5067 			pdev->stats.tx.dropped.fw_reason1);
5068 	DP_PRINT_STATS("	FW removed untransmitted fw_reason2 = %d",
5069 			pdev->stats.tx.dropped.fw_reason2);
5070 	DP_PRINT_STATS("	FW removed untransmitted fw_reason3 = %d",
5071 			pdev->stats.tx.dropped.fw_reason3);
5072 	DP_PRINT_STATS("	Aged Out from msdu/mpdu queues = %d",
5073 			pdev->stats.tx.dropped.age_out);
5074 	DP_PRINT_STATS("Scatter Gather:");
5075 	DP_PRINT_STATS("	Packets = %d",
5076 			pdev->stats.tx_i.sg.sg_pkt.num);
5077 	DP_PRINT_STATS("	Bytes = %llu",
5078 			pdev->stats.tx_i.sg.sg_pkt.bytes);
5079 	DP_PRINT_STATS("	Dropped By Host = %d",
5080 			pdev->stats.tx_i.sg.dropped_host);
5081 	DP_PRINT_STATS("	Dropped By Target = %d",
5082 			pdev->stats.tx_i.sg.dropped_target);
5083 	DP_PRINT_STATS("TSO:");
5084 	DP_PRINT_STATS("	Number of Segments = %d",
5085 			pdev->stats.tx_i.tso.num_seg);
5086 	DP_PRINT_STATS("	Packets = %d",
5087 			pdev->stats.tx_i.tso.tso_pkt.num);
5088 	DP_PRINT_STATS("	Bytes = %llu",
5089 			pdev->stats.tx_i.tso.tso_pkt.bytes);
5090 	DP_PRINT_STATS("	Dropped By Host = %d",
5091 			pdev->stats.tx_i.tso.dropped_host);
5092 	DP_PRINT_STATS("Mcast Enhancement:");
5093 	DP_PRINT_STATS("	Packets = %d",
5094 			pdev->stats.tx_i.mcast_en.mcast_pkt.num);
5095 	DP_PRINT_STATS("	Bytes = %llu",
5096 			pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
5097 	DP_PRINT_STATS("	Dropped: Map Errors = %d",
5098 			pdev->stats.tx_i.mcast_en.dropped_map_error);
5099 	DP_PRINT_STATS("	Dropped: Self Mac = %d",
5100 			pdev->stats.tx_i.mcast_en.dropped_self_mac);
5101 	DP_PRINT_STATS("	Dropped: Send Fail = %d",
5102 			pdev->stats.tx_i.mcast_en.dropped_send_fail);
5103 	DP_PRINT_STATS("	Unicast sent = %d",
5104 			pdev->stats.tx_i.mcast_en.ucast);
5105 	DP_PRINT_STATS("Raw:");
5106 	DP_PRINT_STATS("	Packets = %d",
5107 			pdev->stats.tx_i.raw.raw_pkt.num);
5108 	DP_PRINT_STATS("	Bytes = %llu",
5109 			pdev->stats.tx_i.raw.raw_pkt.bytes);
5110 	DP_PRINT_STATS("	DMA map error = %d",
5111 			pdev->stats.tx_i.raw.dma_map_error);
5112 	DP_PRINT_STATS("Reinjected:");
5113 	DP_PRINT_STATS("	Packets = %d",
5114 			pdev->stats.tx_i.reinject_pkts.num);
5115 	DP_PRINT_STATS("Bytes = %llu\n",
5116 				pdev->stats.tx_i.reinject_pkts.bytes);
5117 	DP_PRINT_STATS("Inspected:");
5118 	DP_PRINT_STATS("	Packets = %d",
5119 			pdev->stats.tx_i.inspect_pkts.num);
5120 	DP_PRINT_STATS("	Bytes = %llu",
5121 			pdev->stats.tx_i.inspect_pkts.bytes);
5122 	DP_PRINT_STATS("Nawds Multicast:");
5123 	DP_PRINT_STATS("	Packets = %d",
5124 			pdev->stats.tx_i.nawds_mcast.num);
5125 	DP_PRINT_STATS("	Bytes = %llu",
5126 			pdev->stats.tx_i.nawds_mcast.bytes);
5127 	DP_PRINT_STATS("CCE Classified:");
5128 	DP_PRINT_STATS("	CCE Classified Packets: %u",
5129 			pdev->stats.tx_i.cce_classified);
5130 	DP_PRINT_STATS("	RAW CCE Classified Packets: %u",
5131 			pdev->stats.tx_i.cce_classified_raw);
5132 	DP_PRINT_STATS("Mesh stats:");
5133 	DP_PRINT_STATS("	frames to firmware: %u",
5134 			pdev->stats.tx_i.mesh.exception_fw);
5135 	DP_PRINT_STATS("	completions from fw: %u",
5136 			pdev->stats.tx_i.mesh.completion_fw);
5137 	DP_PRINT_STATS("PPDU stats counter");
5138 	for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
5139 		DP_PRINT_STATS("	Tag[%d] = %llu", index,
5140 				pdev->stats.ppdu_stats_counter[index]);
5141 	}
5142 }
5143 
5144 /**
5145  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
5146  * @pdev: DP_PDEV Handle
5147  *
5148  * Return: void
5149  */
5150 static inline void
5151 dp_print_pdev_rx_stats(struct dp_pdev *pdev)
5152 {
5153 	DP_PRINT_STATS("PDEV Rx Stats:\n");
5154 	DP_PRINT_STATS("Received From HW (Per Rx Ring):");
5155 	DP_PRINT_STATS("	Packets = %d %d %d %d",
5156 			pdev->stats.rx.rcvd_reo[0].num,
5157 			pdev->stats.rx.rcvd_reo[1].num,
5158 			pdev->stats.rx.rcvd_reo[2].num,
5159 			pdev->stats.rx.rcvd_reo[3].num);
5160 	DP_PRINT_STATS("	Bytes = %llu %llu %llu %llu",
5161 			pdev->stats.rx.rcvd_reo[0].bytes,
5162 			pdev->stats.rx.rcvd_reo[1].bytes,
5163 			pdev->stats.rx.rcvd_reo[2].bytes,
5164 			pdev->stats.rx.rcvd_reo[3].bytes);
5165 	DP_PRINT_STATS("Replenished:");
5166 	DP_PRINT_STATS("	Packets = %d",
5167 			pdev->stats.replenish.pkts.num);
5168 	DP_PRINT_STATS("	Bytes = %llu",
5169 			pdev->stats.replenish.pkts.bytes);
5170 	DP_PRINT_STATS("	Buffers Added To Freelist = %d",
5171 			pdev->stats.buf_freelist);
5172 	DP_PRINT_STATS("	Low threshold intr = %d",
5173 			pdev->stats.replenish.low_thresh_intrs);
5174 	DP_PRINT_STATS("Dropped:");
5175 	DP_PRINT_STATS("	msdu_not_done = %d",
5176 			pdev->stats.dropped.msdu_not_done);
5177 	DP_PRINT_STATS("        mon_rx_drop = %d",
5178 			pdev->stats.dropped.mon_rx_drop);
5179 	DP_PRINT_STATS("Sent To Stack:");
5180 	DP_PRINT_STATS("	Packets = %d",
5181 			pdev->stats.rx.to_stack.num);
5182 	DP_PRINT_STATS("	Bytes = %llu",
5183 			pdev->stats.rx.to_stack.bytes);
5184 	DP_PRINT_STATS("Multicast/Broadcast:");
5185 	DP_PRINT_STATS("	Packets = %d",
5186 			(pdev->stats.rx.multicast.num +
5187 			pdev->stats.rx.bcast.num));
5188 	DP_PRINT_STATS("	Bytes = %llu",
5189 			(pdev->stats.rx.multicast.bytes +
5190 			pdev->stats.rx.bcast.bytes));
5191 	DP_PRINT_STATS("Errors:");
5192 	DP_PRINT_STATS("	Rxdma Ring Un-inititalized = %d",
5193 			pdev->stats.replenish.rxdma_err);
5194 	DP_PRINT_STATS("	Desc Alloc Failed: = %d",
5195 			pdev->stats.err.desc_alloc_fail);
5196 	DP_PRINT_STATS("IP checksum error = %d",
5197 		       pdev->stats.err.ip_csum_err);
5198 	DP_PRINT_STATS("TCP/UDP checksum error = %d",
5199 		       pdev->stats.err.tcp_udp_csum_err);
5200 
5201 	/* Get bar_recv_cnt */
5202 	dp_aggregate_pdev_ctrl_frames_stats(pdev);
5203 	DP_PRINT_STATS("BAR Received Count: = %d",
5204 			pdev->stats.rx.bar_recv_cnt);
5205 
5206 }
5207 
5208 /**
5209  * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
5210  * @pdev: DP_PDEV Handle
5211  *
5212  * Return: void
5213  */
5214 static inline void
5215 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
5216 {
5217 	struct cdp_pdev_mon_stats *rx_mon_stats;
5218 
5219 	rx_mon_stats = &pdev->rx_mon_stats;
5220 
5221 	DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
5222 
5223 	dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
5224 
5225 	DP_PRINT_STATS("status_ppdu_done_cnt = %d",
5226 		       rx_mon_stats->status_ppdu_done);
5227 	DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
5228 		       rx_mon_stats->dest_ppdu_done);
5229 	DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
5230 		       rx_mon_stats->dest_mpdu_done);
5231 	DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
5232 		       rx_mon_stats->dest_mpdu_drop);
5233 }
5234 
5235 /**
5236  * dp_print_soc_tx_stats(): Print SOC level  stats
5237  * @soc DP_SOC Handle
5238  *
5239  * Return: void
5240  */
5241 static inline void
5242 dp_print_soc_tx_stats(struct dp_soc *soc)
5243 {
5244 	DP_PRINT_STATS("SOC Tx Stats:\n");
5245 	DP_PRINT_STATS("Tx Descriptors In Use = %d",
5246 			soc->stats.tx.desc_in_use);
5247 	DP_PRINT_STATS("Invalid peer:");
5248 	DP_PRINT_STATS("	Packets = %d",
5249 			soc->stats.tx.tx_invalid_peer.num);
5250 	DP_PRINT_STATS("	Bytes = %llu",
5251 			soc->stats.tx.tx_invalid_peer.bytes);
5252 	DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
5253 			soc->stats.tx.tcl_ring_full[0],
5254 			soc->stats.tx.tcl_ring_full[1],
5255 			soc->stats.tx.tcl_ring_full[2]);
5256 
5257 }
5258 /**
5259  * dp_print_soc_rx_stats: Print SOC level Rx stats
5260  * @soc: DP_SOC Handle
5261  *
5262  * Return:void
5263  */
5264 static inline void
5265 dp_print_soc_rx_stats(struct dp_soc *soc)
5266 {
5267 	uint32_t i;
5268 	char reo_error[DP_REO_ERR_LENGTH];
5269 	char rxdma_error[DP_RXDMA_ERR_LENGTH];
5270 	uint8_t index = 0;
5271 
5272 	DP_PRINT_STATS("SOC Rx Stats:\n");
5273 	DP_PRINT_STATS("Errors:\n");
5274 	DP_PRINT_STATS("Rx Decrypt Errors = %d",
5275 			(soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
5276 			soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
5277 	DP_PRINT_STATS("Invalid RBM = %d",
5278 			soc->stats.rx.err.invalid_rbm);
5279 	DP_PRINT_STATS("Invalid Vdev = %d",
5280 			soc->stats.rx.err.invalid_vdev);
5281 	DP_PRINT_STATS("Invalid Pdev = %d",
5282 			soc->stats.rx.err.invalid_pdev);
5283 	DP_PRINT_STATS("Invalid Peer = %d",
5284 			soc->stats.rx.err.rx_invalid_peer.num);
5285 	DP_PRINT_STATS("HAL Ring Access Fail = %d",
5286 			soc->stats.rx.err.hal_ring_access_fail);
5287 
5288 	for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
5289 		index += qdf_snprint(&rxdma_error[index],
5290 				DP_RXDMA_ERR_LENGTH - index,
5291 				" %d", soc->stats.rx.err.rxdma_error[i]);
5292 	}
5293 	DP_PRINT_STATS("RXDMA Error (0-31):%s",
5294 			rxdma_error);
5295 
5296 	index = 0;
5297 	for (i = 0; i < HAL_REO_ERR_MAX; i++) {
5298 		index += qdf_snprint(&reo_error[index],
5299 				DP_REO_ERR_LENGTH - index,
5300 				" %d", soc->stats.rx.err.reo_error[i]);
5301 	}
5302 	DP_PRINT_STATS("REO Error(0-14):%s",
5303 			reo_error);
5304 }
5305 
5306 
5307 /**
5308  * dp_print_ring_stat_from_hal(): Print hal level ring stats
5309  * @soc: DP_SOC handle
5310  * @srng: DP_SRNG handle
5311  * @ring_name: SRNG name
5312  *
5313  * Return: void
5314  */
5315 static inline void
5316 dp_print_ring_stat_from_hal(struct dp_soc *soc,  struct dp_srng *srng,
5317 	char *ring_name)
5318 {
5319 	uint32_t tailp;
5320 	uint32_t headp;
5321 
5322 	if (srng->hal_srng != NULL) {
5323 		hal_api_get_tphp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
5324 		DP_PRINT_STATS("%s : Head pointer = %d  Tail Pointer = %d\n",
5325 				ring_name, headp, tailp);
5326 	}
5327 }
5328 
5329 /**
5330  * dp_print_ring_stats(): Print tail and head pointer
5331  * @pdev: DP_PDEV handle
5332  *
5333  * Return:void
5334  */
5335 static inline void
5336 dp_print_ring_stats(struct dp_pdev *pdev)
5337 {
5338 	uint32_t i;
5339 	char ring_name[STR_MAXLEN + 1];
5340 	int mac_id;
5341 
5342 	dp_print_ring_stat_from_hal(pdev->soc,
5343 			&pdev->soc->reo_exception_ring,
5344 			"Reo Exception Ring");
5345 	dp_print_ring_stat_from_hal(pdev->soc,
5346 			&pdev->soc->reo_reinject_ring,
5347 			"Reo Inject Ring");
5348 	dp_print_ring_stat_from_hal(pdev->soc,
5349 			&pdev->soc->reo_cmd_ring,
5350 			"Reo Command Ring");
5351 	dp_print_ring_stat_from_hal(pdev->soc,
5352 			&pdev->soc->reo_status_ring,
5353 			"Reo Status Ring");
5354 	dp_print_ring_stat_from_hal(pdev->soc,
5355 			&pdev->soc->rx_rel_ring,
5356 			"Rx Release ring");
5357 	dp_print_ring_stat_from_hal(pdev->soc,
5358 			&pdev->soc->tcl_cmd_ring,
5359 			"Tcl command Ring");
5360 	dp_print_ring_stat_from_hal(pdev->soc,
5361 			&pdev->soc->tcl_status_ring,
5362 			"Tcl Status Ring");
5363 	dp_print_ring_stat_from_hal(pdev->soc,
5364 			&pdev->soc->wbm_desc_rel_ring,
5365 			"Wbm Desc Rel Ring");
5366 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5367 		snprintf(ring_name, STR_MAXLEN, "Reo Dest Ring %d", i);
5368 		dp_print_ring_stat_from_hal(pdev->soc,
5369 				&pdev->soc->reo_dest_ring[i],
5370 				ring_name);
5371 	}
5372 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
5373 		snprintf(ring_name, STR_MAXLEN, "Tcl Data Ring %d", i);
5374 		dp_print_ring_stat_from_hal(pdev->soc,
5375 				&pdev->soc->tcl_data_ring[i],
5376 				ring_name);
5377 	}
5378 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
5379 		snprintf(ring_name, STR_MAXLEN, "Tx Comp Ring %d", i);
5380 		dp_print_ring_stat_from_hal(pdev->soc,
5381 				&pdev->soc->tx_comp_ring[i],
5382 				ring_name);
5383 	}
5384 	dp_print_ring_stat_from_hal(pdev->soc,
5385 			&pdev->rx_refill_buf_ring,
5386 			"Rx Refill Buf Ring");
5387 
5388 	dp_print_ring_stat_from_hal(pdev->soc,
5389 			&pdev->rx_refill_buf_ring2,
5390 			"Second Rx Refill Buf Ring");
5391 
5392 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5393 		dp_print_ring_stat_from_hal(pdev->soc,
5394 				&pdev->rxdma_mon_buf_ring[mac_id],
5395 				"Rxdma Mon Buf Ring");
5396 		dp_print_ring_stat_from_hal(pdev->soc,
5397 				&pdev->rxdma_mon_dst_ring[mac_id],
5398 				"Rxdma Mon Dst Ring");
5399 		dp_print_ring_stat_from_hal(pdev->soc,
5400 				&pdev->rxdma_mon_status_ring[mac_id],
5401 				"Rxdma Mon Status Ring");
5402 		dp_print_ring_stat_from_hal(pdev->soc,
5403 				&pdev->rxdma_mon_desc_ring[mac_id],
5404 				"Rxdma mon desc Ring");
5405 	}
5406 
5407 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
5408 		snprintf(ring_name, STR_MAXLEN, "Rxdma err dst ring %d", i);
5409 		dp_print_ring_stat_from_hal(pdev->soc,
5410 			&pdev->rxdma_err_dst_ring[i],
5411 			ring_name);
5412 	}
5413 
5414 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
5415 		snprintf(ring_name, STR_MAXLEN, "Rx mac buf ring %d", i);
5416 		dp_print_ring_stat_from_hal(pdev->soc,
5417 				&pdev->rx_mac_buf_ring[i],
5418 				ring_name);
5419 	}
5420 }
5421 
5422 /**
5423  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
5424  * @vdev: DP_VDEV handle
5425  *
5426  * Return:void
5427  */
5428 static inline void
5429 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
5430 {
5431 	struct dp_peer *peer = NULL;
5432 	struct dp_soc *soc = (struct dp_soc *)vdev->pdev->soc;
5433 
5434 	DP_STATS_CLR(vdev->pdev);
5435 	DP_STATS_CLR(vdev->pdev->soc);
5436 	DP_STATS_CLR(vdev);
5437 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5438 		if (!peer)
5439 			return;
5440 		DP_STATS_CLR(peer);
5441 
5442 		if (soc->cdp_soc.ol_ops->update_dp_stats) {
5443 			soc->cdp_soc.ol_ops->update_dp_stats(
5444 					vdev->pdev->osif_pdev,
5445 					&peer->stats,
5446 					peer->peer_ids[0],
5447 					UPDATE_PEER_STATS);
5448 		}
5449 
5450 	}
5451 
5452 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5453 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->osif_pdev,
5454 				&vdev->stats, (uint16_t)vdev->vdev_id,
5455 				UPDATE_VDEV_STATS);
5456 }
5457 
5458 /**
5459  * dp_print_rx_rates(): Print Rx rate stats
5460  * @vdev: DP_VDEV handle
5461  *
5462  * Return:void
5463  */
5464 static inline void
5465 dp_print_rx_rates(struct dp_vdev *vdev)
5466 {
5467 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5468 	uint8_t i, mcs, pkt_type;
5469 	uint8_t index = 0;
5470 	char nss[DP_NSS_LENGTH];
5471 
5472 	DP_PRINT_STATS("Rx Rate Info:\n");
5473 
5474 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5475 		index = 0;
5476 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5477 			if (!dp_rate_string[pkt_type][mcs].valid)
5478 				continue;
5479 
5480 			DP_PRINT_STATS("	%s = %d",
5481 					dp_rate_string[pkt_type][mcs].mcs_type,
5482 					pdev->stats.rx.pkt_type[pkt_type].
5483 					mcs_count[mcs]);
5484 		}
5485 
5486 		DP_PRINT_STATS("\n");
5487 	}
5488 
5489 	index = 0;
5490 	for (i = 0; i < SS_COUNT; i++) {
5491 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5492 				" %d", pdev->stats.rx.nss[i]);
5493 	}
5494 	DP_PRINT_STATS("NSS(1-8) = %s",
5495 			nss);
5496 
5497 	DP_PRINT_STATS("SGI ="
5498 			" 0.8us %d,"
5499 			" 0.4us %d,"
5500 			" 1.6us %d,"
5501 			" 3.2us %d,",
5502 			pdev->stats.rx.sgi_count[0],
5503 			pdev->stats.rx.sgi_count[1],
5504 			pdev->stats.rx.sgi_count[2],
5505 			pdev->stats.rx.sgi_count[3]);
5506 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
5507 			pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
5508 			pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
5509 	DP_PRINT_STATS("Reception Type ="
5510 			" SU: %d,"
5511 			" MU_MIMO:%d,"
5512 			" MU_OFDMA:%d,"
5513 			" MU_OFDMA_MIMO:%d\n",
5514 			pdev->stats.rx.reception_type[0],
5515 			pdev->stats.rx.reception_type[1],
5516 			pdev->stats.rx.reception_type[2],
5517 			pdev->stats.rx.reception_type[3]);
5518 	DP_PRINT_STATS("Aggregation:\n");
5519 	DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
5520 			pdev->stats.rx.ampdu_cnt);
5521 	DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
5522 			pdev->stats.rx.non_ampdu_cnt);
5523 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
5524 			pdev->stats.rx.amsdu_cnt);
5525 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
5526 			pdev->stats.rx.non_amsdu_cnt);
5527 }
5528 
5529 /**
5530  * dp_print_tx_rates(): Print tx rates
5531  * @vdev: DP_VDEV handle
5532  *
5533  * Return:void
5534  */
5535 static inline void
5536 dp_print_tx_rates(struct dp_vdev *vdev)
5537 {
5538 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5539 	uint8_t mcs, pkt_type;
5540 	uint32_t index;
5541 
5542 	DP_PRINT_STATS("Tx Rate Info:\n");
5543 
5544 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5545 		index = 0;
5546 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5547 			if (!dp_rate_string[pkt_type][mcs].valid)
5548 				continue;
5549 
5550 			DP_PRINT_STATS("	%s = %d",
5551 					dp_rate_string[pkt_type][mcs].mcs_type,
5552 					pdev->stats.tx.pkt_type[pkt_type].
5553 					mcs_count[mcs]);
5554 		}
5555 
5556 		DP_PRINT_STATS("\n");
5557 	}
5558 
5559 	DP_PRINT_STATS("SGI ="
5560 			" 0.8us %d"
5561 			" 0.4us %d"
5562 			" 1.6us %d"
5563 			" 3.2us %d",
5564 			pdev->stats.tx.sgi_count[0],
5565 			pdev->stats.tx.sgi_count[1],
5566 			pdev->stats.tx.sgi_count[2],
5567 			pdev->stats.tx.sgi_count[3]);
5568 
5569 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
5570 			pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
5571 			pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
5572 
5573 	DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
5574 	DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
5575 	DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
5576 	DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
5577 	DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
5578 
5579 	DP_PRINT_STATS("Aggregation:\n");
5580 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
5581 			pdev->stats.tx.amsdu_cnt);
5582 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
5583 			pdev->stats.tx.non_amsdu_cnt);
5584 }
5585 
5586 /**
5587  * dp_print_peer_stats():print peer stats
5588  * @peer: DP_PEER handle
5589  *
5590  * return void
5591  */
5592 static inline void dp_print_peer_stats(struct dp_peer *peer)
5593 {
5594 	uint8_t i, mcs, pkt_type;
5595 	uint32_t index;
5596 	char nss[DP_NSS_LENGTH];
5597 	DP_PRINT_STATS("Node Tx Stats:\n");
5598 	DP_PRINT_STATS("Total Packet Completions = %d",
5599 			peer->stats.tx.comp_pkt.num);
5600 	DP_PRINT_STATS("Total Bytes Completions = %llu",
5601 			peer->stats.tx.comp_pkt.bytes);
5602 	DP_PRINT_STATS("Success Packets = %d",
5603 			peer->stats.tx.tx_success.num);
5604 	DP_PRINT_STATS("Success Bytes = %llu",
5605 			peer->stats.tx.tx_success.bytes);
5606 	DP_PRINT_STATS("Unicast Success Packets = %d",
5607 			peer->stats.tx.ucast.num);
5608 	DP_PRINT_STATS("Unicast Success Bytes = %llu",
5609 			peer->stats.tx.ucast.bytes);
5610 	DP_PRINT_STATS("Multicast Success Packets = %d",
5611 			peer->stats.tx.mcast.num);
5612 	DP_PRINT_STATS("Multicast Success Bytes = %llu",
5613 			peer->stats.tx.mcast.bytes);
5614 	DP_PRINT_STATS("Broadcast Success Packets = %d",
5615 			peer->stats.tx.bcast.num);
5616 	DP_PRINT_STATS("Broadcast Success Bytes = %llu",
5617 			peer->stats.tx.bcast.bytes);
5618 	DP_PRINT_STATS("Packets Failed = %d",
5619 			peer->stats.tx.tx_failed);
5620 	DP_PRINT_STATS("Packets In OFDMA = %d",
5621 			peer->stats.tx.ofdma);
5622 	DP_PRINT_STATS("Packets In STBC = %d",
5623 			peer->stats.tx.stbc);
5624 	DP_PRINT_STATS("Packets In LDPC = %d",
5625 			peer->stats.tx.ldpc);
5626 	DP_PRINT_STATS("Packet Retries = %d",
5627 			peer->stats.tx.retries);
5628 	DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
5629 			peer->stats.tx.amsdu_cnt);
5630 	DP_PRINT_STATS("Last Packet RSSI = %d",
5631 			peer->stats.tx.last_ack_rssi);
5632 	DP_PRINT_STATS("Dropped At FW: Removed = %d",
5633 			peer->stats.tx.dropped.fw_rem);
5634 	DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
5635 			peer->stats.tx.dropped.fw_rem_tx);
5636 	DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
5637 			peer->stats.tx.dropped.fw_rem_notx);
5638 	DP_PRINT_STATS("Dropped : Age Out = %d",
5639 			peer->stats.tx.dropped.age_out);
5640 	DP_PRINT_STATS("NAWDS : ");
5641 	DP_PRINT_STATS("	Nawds multicast Drop Tx Packet = %d",
5642 			peer->stats.tx.nawds_mcast_drop);
5643 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Count = %d",
5644 			peer->stats.tx.nawds_mcast.num);
5645 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Bytes = %llu",
5646 			peer->stats.tx.nawds_mcast.bytes);
5647 
5648 	DP_PRINT_STATS("Rate Info:");
5649 
5650 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5651 		index = 0;
5652 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5653 			if (!dp_rate_string[pkt_type][mcs].valid)
5654 				continue;
5655 
5656 			DP_PRINT_STATS("	%s = %d",
5657 					dp_rate_string[pkt_type][mcs].mcs_type,
5658 					peer->stats.tx.pkt_type[pkt_type].
5659 					mcs_count[mcs]);
5660 		}
5661 
5662 		DP_PRINT_STATS("\n");
5663 	}
5664 
5665 	DP_PRINT_STATS("SGI = "
5666 			" 0.8us %d"
5667 			" 0.4us %d"
5668 			" 1.6us %d"
5669 			" 3.2us %d",
5670 			peer->stats.tx.sgi_count[0],
5671 			peer->stats.tx.sgi_count[1],
5672 			peer->stats.tx.sgi_count[2],
5673 			peer->stats.tx.sgi_count[3]);
5674 	DP_PRINT_STATS("Excess Retries per AC ");
5675 	DP_PRINT_STATS("	 Best effort = %d",
5676 			peer->stats.tx.excess_retries_per_ac[0]);
5677 	DP_PRINT_STATS("	 Background= %d",
5678 			peer->stats.tx.excess_retries_per_ac[1]);
5679 	DP_PRINT_STATS("	 Video = %d",
5680 			peer->stats.tx.excess_retries_per_ac[2]);
5681 	DP_PRINT_STATS("	 Voice = %d",
5682 			peer->stats.tx.excess_retries_per_ac[3]);
5683 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
5684 			peer->stats.tx.bw[2], peer->stats.tx.bw[3],
5685 			peer->stats.tx.bw[4], peer->stats.tx.bw[5]);
5686 
5687 	index = 0;
5688 	for (i = 0; i < SS_COUNT; i++) {
5689 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5690 				" %d", peer->stats.tx.nss[i]);
5691 	}
5692 	DP_PRINT_STATS("NSS(1-8) = %s",
5693 			nss);
5694 
5695 	DP_PRINT_STATS("Aggregation:");
5696 	DP_PRINT_STATS("	Number of Msdu's Part of Amsdu = %d",
5697 			peer->stats.tx.amsdu_cnt);
5698 	DP_PRINT_STATS("	Number of Msdu's With No Msdu Level Aggregation = %d\n",
5699 			peer->stats.tx.non_amsdu_cnt);
5700 
5701 	DP_PRINT_STATS("Node Rx Stats:");
5702 	DP_PRINT_STATS("Packets Sent To Stack = %d",
5703 			peer->stats.rx.to_stack.num);
5704 	DP_PRINT_STATS("Bytes Sent To Stack = %llu",
5705 			peer->stats.rx.to_stack.bytes);
5706 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
5707 		DP_PRINT_STATS("Ring Id = %d", i);
5708 		DP_PRINT_STATS("	Packets Received = %d",
5709 				peer->stats.rx.rcvd_reo[i].num);
5710 		DP_PRINT_STATS("	Bytes Received = %llu",
5711 				peer->stats.rx.rcvd_reo[i].bytes);
5712 	}
5713 	DP_PRINT_STATS("Multicast Packets Received = %d",
5714 			peer->stats.rx.multicast.num);
5715 	DP_PRINT_STATS("Multicast Bytes Received = %llu",
5716 			peer->stats.rx.multicast.bytes);
5717 	DP_PRINT_STATS("Broadcast Packets Received = %d",
5718 			peer->stats.rx.bcast.num);
5719 	DP_PRINT_STATS("Broadcast Bytes Received = %llu",
5720 			peer->stats.rx.bcast.bytes);
5721 	DP_PRINT_STATS("Intra BSS Packets Received = %d",
5722 			peer->stats.rx.intra_bss.pkts.num);
5723 	DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
5724 			peer->stats.rx.intra_bss.pkts.bytes);
5725 	DP_PRINT_STATS("Raw Packets Received = %d",
5726 			peer->stats.rx.raw.num);
5727 	DP_PRINT_STATS("Raw Bytes Received = %llu",
5728 			peer->stats.rx.raw.bytes);
5729 	DP_PRINT_STATS("Errors: MIC Errors = %d",
5730 			peer->stats.rx.err.mic_err);
5731 	DP_PRINT_STATS("Erros: Decryption Errors = %d",
5732 			peer->stats.rx.err.decrypt_err);
5733 	DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
5734 			peer->stats.rx.non_ampdu_cnt);
5735 	DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
5736 			peer->stats.rx.ampdu_cnt);
5737 	DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
5738 			peer->stats.rx.non_amsdu_cnt);
5739 	DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
5740 			peer->stats.rx.amsdu_cnt);
5741 	DP_PRINT_STATS("NAWDS : ");
5742 	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet = %d",
5743 			peer->stats.rx.nawds_mcast_drop);
5744 	DP_PRINT_STATS("SGI ="
5745 			" 0.8us %d"
5746 			" 0.4us %d"
5747 			" 1.6us %d"
5748 			" 3.2us %d",
5749 			peer->stats.rx.sgi_count[0],
5750 			peer->stats.rx.sgi_count[1],
5751 			peer->stats.rx.sgi_count[2],
5752 			peer->stats.rx.sgi_count[3]);
5753 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
5754 			peer->stats.rx.bw[0], peer->stats.rx.bw[1],
5755 			peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
5756 	DP_PRINT_STATS("Reception Type ="
5757 			" SU %d,"
5758 			" MU_MIMO %d,"
5759 			" MU_OFDMA %d,"
5760 			" MU_OFDMA_MIMO %d",
5761 			peer->stats.rx.reception_type[0],
5762 			peer->stats.rx.reception_type[1],
5763 			peer->stats.rx.reception_type[2],
5764 			peer->stats.rx.reception_type[3]);
5765 
5766 
5767 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5768 		index = 0;
5769 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5770 			if (!dp_rate_string[pkt_type][mcs].valid)
5771 				continue;
5772 
5773 			DP_PRINT_STATS("	%s = %d",
5774 					dp_rate_string[pkt_type][mcs].mcs_type,
5775 					peer->stats.rx.pkt_type[pkt_type].
5776 					mcs_count[mcs]);
5777 		}
5778 
5779 		DP_PRINT_STATS("\n");
5780 	}
5781 
5782 	index = 0;
5783 	for (i = 0; i < SS_COUNT; i++) {
5784 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5785 				" %d", peer->stats.rx.nss[i]);
5786 	}
5787 	DP_PRINT_STATS("NSS(1-8) = %s",
5788 			nss);
5789 
5790 	DP_PRINT_STATS("Aggregation:");
5791 	DP_PRINT_STATS("	Msdu's Part of Ampdu = %d",
5792 			peer->stats.rx.ampdu_cnt);
5793 	DP_PRINT_STATS("	Msdu's With No Mpdu Level Aggregation = %d",
5794 			peer->stats.rx.non_ampdu_cnt);
5795 	DP_PRINT_STATS("	Msdu's Part of Amsdu = %d",
5796 			peer->stats.rx.amsdu_cnt);
5797 	DP_PRINT_STATS("	Msdu's With No Msdu Level Aggregation = %d",
5798 			peer->stats.rx.non_amsdu_cnt);
5799 }
5800 
5801 /**
5802  * dp_print_host_stats()- Function to print the stats aggregated at host
5803  * @vdev_handle: DP_VDEV handle
5804  * @type: host stats type
5805  *
5806  * Available Stat types
5807  * TXRX_CLEAR_STATS  : Clear the stats
5808  * TXRX_RX_RATE_STATS: Print Rx Rate Info
5809  * TXRX_TX_RATE_STATS: Print Tx Rate Info
5810  * TXRX_TX_HOST_STATS: Print Tx Stats
5811  * TXRX_RX_HOST_STATS: Print Rx Stats
5812  * TXRX_AST_STATS: Print AST Stats
5813  * TXRX_SRNG_PTR_STATS: Print SRNG ring pointer stats
5814  *
5815  * Return: 0 on success, print error message in case of failure
5816  */
5817 static int
5818 dp_print_host_stats(struct cdp_vdev *vdev_handle, enum cdp_host_txrx_stats type)
5819 {
5820 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5821 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5822 
5823 	dp_aggregate_pdev_stats(pdev);
5824 
5825 	switch (type) {
5826 	case TXRX_CLEAR_STATS:
5827 		dp_txrx_host_stats_clr(vdev);
5828 		break;
5829 	case TXRX_RX_RATE_STATS:
5830 		dp_print_rx_rates(vdev);
5831 		break;
5832 	case TXRX_TX_RATE_STATS:
5833 		dp_print_tx_rates(vdev);
5834 		break;
5835 	case TXRX_TX_HOST_STATS:
5836 		dp_print_pdev_tx_stats(pdev);
5837 		dp_print_soc_tx_stats(pdev->soc);
5838 		break;
5839 	case TXRX_RX_HOST_STATS:
5840 		dp_print_pdev_rx_stats(pdev);
5841 		dp_print_soc_rx_stats(pdev->soc);
5842 		break;
5843 	case TXRX_AST_STATS:
5844 		dp_print_ast_stats(pdev->soc);
5845 		dp_print_peer_table(vdev);
5846 		break;
5847 	case TXRX_SRNG_PTR_STATS:
5848 		dp_print_ring_stats(pdev);
5849 		break;
5850 	case TXRX_RX_MON_STATS:
5851 		dp_print_pdev_rx_mon_stats(pdev);
5852 		break;
5853 	default:
5854 		DP_TRACE(FATAL, "Wrong Input For TxRx Host Stats");
5855 		break;
5856 	}
5857 	return 0;
5858 }
5859 
5860 /*
5861  * dp_get_host_peer_stats()- function to print peer stats
5862  * @pdev_handle: DP_PDEV handle
5863  * @mac_addr: mac address of the peer
5864  *
5865  * Return: void
5866  */
5867 static void
5868 dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
5869 {
5870 	struct dp_peer *peer;
5871 	uint8_t local_id;
5872 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
5873 			&local_id);
5874 
5875 	if (!peer) {
5876 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5877 			"%s: Invalid peer\n", __func__);
5878 		return;
5879 	}
5880 
5881 	dp_print_peer_stats(peer);
5882 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
5883 	return;
5884 }
5885 
5886 /*
5887  * dp_ppdu_ring_reset()- Reset PPDU Stats ring
5888  * @pdev: DP_PDEV handle
5889  *
5890  * Return: void
5891  */
5892 static void
5893 dp_ppdu_ring_reset(struct dp_pdev *pdev)
5894 {
5895 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5896 	int mac_id;
5897 
5898 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5899 
5900 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5901 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5902 							pdev->pdev_id);
5903 
5904 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
5905 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5906 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5907 	}
5908 }
5909 
5910 /*
5911  * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
5912  * @pdev: DP_PDEV handle
5913  *
5914  * Return: void
5915  */
5916 static void
5917 dp_ppdu_ring_cfg(struct dp_pdev *pdev)
5918 {
5919 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
5920 	int mac_id;
5921 
5922 	htt_tlv_filter.mpdu_start = 1;
5923 	htt_tlv_filter.msdu_start = 0;
5924 	htt_tlv_filter.packet = 0;
5925 	htt_tlv_filter.msdu_end = 0;
5926 	htt_tlv_filter.mpdu_end = 0;
5927 	htt_tlv_filter.attention = 0;
5928 	htt_tlv_filter.ppdu_start = 1;
5929 	htt_tlv_filter.ppdu_end = 1;
5930 	htt_tlv_filter.ppdu_end_user_stats = 1;
5931 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5932 	htt_tlv_filter.ppdu_end_status_done = 1;
5933 	htt_tlv_filter.enable_fp = 1;
5934 	htt_tlv_filter.enable_md = 0;
5935 	if (pdev->mcopy_mode) {
5936 		htt_tlv_filter.packet_header = 1;
5937 		htt_tlv_filter.enable_mo = 1;
5938 	}
5939 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5940 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5941 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5942 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5943 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5944 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
5945 
5946 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5947 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5948 						pdev->pdev_id);
5949 
5950 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
5951 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5952 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5953 	}
5954 }
5955 
5956 /*
5957  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
5958  * @pdev_handle: DP_PDEV handle
5959  * @val: user provided value
5960  *
5961  * Return: void
5962  */
5963 static void
5964 dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
5965 {
5966 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5967 
5968 	switch (val) {
5969 	case 0:
5970 		pdev->tx_sniffer_enable = 0;
5971 		pdev->mcopy_mode = 0;
5972 
5973 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en) {
5974 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
5975 			dp_ppdu_ring_reset(pdev);
5976 		} else if (pdev->enhanced_stats_en) {
5977 			dp_h2t_cfg_stats_msg_send(pdev,
5978 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
5979 		}
5980 		break;
5981 
5982 	case 1:
5983 		pdev->tx_sniffer_enable = 1;
5984 		pdev->mcopy_mode = 0;
5985 
5986 		if (!pdev->pktlog_ppdu_stats)
5987 			dp_h2t_cfg_stats_msg_send(pdev,
5988 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
5989 		break;
5990 	case 2:
5991 		pdev->mcopy_mode = 1;
5992 		pdev->tx_sniffer_enable = 0;
5993 		if (!pdev->enhanced_stats_en)
5994 			dp_ppdu_ring_cfg(pdev);
5995 
5996 		if (!pdev->pktlog_ppdu_stats)
5997 			dp_h2t_cfg_stats_msg_send(pdev,
5998 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
5999 		break;
6000 	default:
6001 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6002 			"Invalid value\n");
6003 		break;
6004 	}
6005 }
6006 
6007 /*
6008  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
6009  * @pdev_handle: DP_PDEV handle
6010  *
6011  * Return: void
6012  */
6013 static void
6014 dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
6015 {
6016 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6017 	pdev->enhanced_stats_en = 1;
6018 
6019 	if (!pdev->mcopy_mode)
6020 		dp_ppdu_ring_cfg(pdev);
6021 
6022 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable && !pdev->mcopy_mode)
6023 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
6024 }
6025 
6026 /*
6027  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
6028  * @pdev_handle: DP_PDEV handle
6029  *
6030  * Return: void
6031  */
6032 static void
6033 dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
6034 {
6035 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6036 
6037 	pdev->enhanced_stats_en = 0;
6038 
6039 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable && !pdev->mcopy_mode)
6040 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6041 
6042 	if (!pdev->mcopy_mode)
6043 		dp_ppdu_ring_reset(pdev);
6044 }
6045 
6046 /*
6047  * dp_get_fw_peer_stats()- function to print peer stats
6048  * @pdev_handle: DP_PDEV handle
6049  * @mac_addr: mac address of the peer
6050  * @cap: Type of htt stats requested
6051  *
6052  * Currently Supporting only MAC ID based requests Only
6053  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
6054  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
6055  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
6056  *
6057  * Return: void
6058  */
6059 static void
6060 dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
6061 		uint32_t cap)
6062 {
6063 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6064 	int i;
6065 	uint32_t config_param0 = 0;
6066 	uint32_t config_param1 = 0;
6067 	uint32_t config_param2 = 0;
6068 	uint32_t config_param3 = 0;
6069 
6070 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
6071 	config_param0 |= (1 << (cap + 1));
6072 
6073 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
6074 		config_param1 |= (1 << i);
6075 	}
6076 
6077 	config_param2 |= (mac_addr[0] & 0x000000ff);
6078 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
6079 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
6080 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
6081 
6082 	config_param3 |= (mac_addr[4] & 0x000000ff);
6083 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
6084 
6085 	dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
6086 			config_param0, config_param1, config_param2,
6087 			config_param3, 0, 0, 0);
6088 
6089 }
6090 
6091 /* This struct definition will be removed from here
6092  * once it get added in FW headers*/
6093 struct httstats_cmd_req {
6094     uint32_t    config_param0;
6095     uint32_t    config_param1;
6096     uint32_t    config_param2;
6097     uint32_t    config_param3;
6098     int cookie;
6099     u_int8_t    stats_id;
6100 };
6101 
6102 /*
6103  * dp_get_htt_stats: function to process the httstas request
6104  * @pdev_handle: DP pdev handle
6105  * @data: pointer to request data
6106  * @data_len: length for request data
6107  *
6108  * return: void
6109  */
6110 static void
6111 dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
6112 {
6113 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6114 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
6115 
6116 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
6117 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
6118 				req->config_param0, req->config_param1,
6119 				req->config_param2, req->config_param3,
6120 				req->cookie, 0, 0);
6121 }
6122 /*
6123  * dp_set_pdev_param: function to set parameters in pdev
6124  * @pdev_handle: DP pdev handle
6125  * @param: parameter type to be set
6126  * @val: value of parameter to be set
6127  *
6128  * return: void
6129  */
6130 static void dp_set_pdev_param(struct cdp_pdev *pdev_handle,
6131 		enum cdp_pdev_param_type param, uint8_t val)
6132 {
6133 	switch (param) {
6134 	case CDP_CONFIG_DEBUG_SNIFFER:
6135 		dp_config_debug_sniffer(pdev_handle, val);
6136 		break;
6137 	default:
6138 		break;
6139 	}
6140 }
6141 
6142 /*
6143  * dp_set_vdev_param: function to set parameters in vdev
6144  * @param: parameter type to be set
6145  * @val: value of parameter to be set
6146  *
6147  * return: void
6148  */
6149 static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
6150 		enum cdp_vdev_param_type param, uint32_t val)
6151 {
6152 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6153 	switch (param) {
6154 	case CDP_ENABLE_WDS:
6155 		vdev->wds_enabled = val;
6156 		break;
6157 	case CDP_ENABLE_NAWDS:
6158 		vdev->nawds_enabled = val;
6159 		break;
6160 	case CDP_ENABLE_MCAST_EN:
6161 		vdev->mcast_enhancement_en = val;
6162 		break;
6163 	case CDP_ENABLE_PROXYSTA:
6164 		vdev->proxysta_vdev = val;
6165 		break;
6166 	case CDP_UPDATE_TDLS_FLAGS:
6167 		vdev->tdls_link_connected = val;
6168 		break;
6169 	case CDP_CFG_WDS_AGING_TIMER:
6170 		if (val == 0)
6171 			qdf_timer_stop(&vdev->pdev->soc->wds_aging_timer);
6172 		else if (val != vdev->wds_aging_timer_val)
6173 			qdf_timer_mod(&vdev->pdev->soc->wds_aging_timer, val);
6174 
6175 		vdev->wds_aging_timer_val = val;
6176 		break;
6177 	case CDP_ENABLE_AP_BRIDGE:
6178 		if (wlan_op_mode_sta != vdev->opmode)
6179 			vdev->ap_bridge_enabled = val;
6180 		else
6181 			vdev->ap_bridge_enabled = false;
6182 		break;
6183 	case CDP_ENABLE_CIPHER:
6184 		vdev->sec_type = val;
6185 		break;
6186 	case CDP_ENABLE_QWRAP_ISOLATION:
6187 		vdev->isolation_vdev = val;
6188 		break;
6189 	default:
6190 		break;
6191 	}
6192 
6193 	dp_tx_vdev_update_search_flags(vdev);
6194 }
6195 
6196 /**
6197  * dp_peer_set_nawds: set nawds bit in peer
6198  * @peer_handle: pointer to peer
6199  * @value: enable/disable nawds
6200  *
6201  * return: void
6202  */
6203 static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
6204 {
6205 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6206 	peer->nawds_enabled = value;
6207 }
6208 
6209 /*
6210  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
6211  * @vdev_handle: DP_VDEV handle
6212  * @map_id:ID of map that needs to be updated
6213  *
6214  * Return: void
6215  */
6216 static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
6217 		uint8_t map_id)
6218 {
6219 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6220 	vdev->dscp_tid_map_id = map_id;
6221 	return;
6222 }
6223 
6224 /*
6225  * dp_txrx_stats_publish(): publish pdev stats into a buffer
6226  * @pdev_handle: DP_PDEV handle
6227  * @buf: to hold pdev_stats
6228  *
6229  * Return: int
6230  */
6231 static int
6232 dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
6233 {
6234 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6235 	struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
6236 	struct cdp_txrx_stats_req req = {0,};
6237 
6238 	dp_aggregate_pdev_stats(pdev);
6239 	req.stats = HTT_DBG_EXT_STATS_PDEV_TX;
6240 	req.cookie_val = 1;
6241 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
6242 				req.param1, req.param2, req.param3, 0,
6243 				req.cookie_val, 0);
6244 
6245 	msleep(DP_MAX_SLEEP_TIME);
6246 
6247 	req.stats = HTT_DBG_EXT_STATS_PDEV_RX;
6248 	req.cookie_val = 1;
6249 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
6250 				req.param1, req.param2, req.param3, 0,
6251 				req.cookie_val, 0);
6252 
6253 	msleep(DP_MAX_SLEEP_TIME);
6254 	qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
6255 
6256 	return TXRX_STATS_LEVEL;
6257 }
6258 
6259 /**
6260  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
6261  * @pdev: DP_PDEV handle
6262  * @map_id: ID of map that needs to be updated
6263  * @tos: index value in map
6264  * @tid: tid value passed by the user
6265  *
6266  * Return: void
6267  */
6268 static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
6269 		uint8_t map_id, uint8_t tos, uint8_t tid)
6270 {
6271 	uint8_t dscp;
6272 	struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
6273 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
6274 	pdev->dscp_tid_map[map_id][dscp] = tid;
6275 	if (map_id < HAL_MAX_HW_DSCP_TID_MAPS)
6276 		hal_tx_update_dscp_tid(pdev->soc->hal_soc, tid,
6277 			map_id, dscp);
6278 	return;
6279 }
6280 
6281 /**
6282  * dp_fw_stats_process(): Process TxRX FW stats request
6283  * @vdev_handle: DP VDEV handle
6284  * @req: stats request
6285  *
6286  * return: int
6287  */
6288 static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
6289 		struct cdp_txrx_stats_req *req)
6290 {
6291 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6292 	struct dp_pdev *pdev = NULL;
6293 	uint32_t stats = req->stats;
6294 	uint8_t mac_id = req->mac_id;
6295 
6296 	if (!vdev) {
6297 		DP_TRACE(NONE, "VDEV not found");
6298 		return 1;
6299 	}
6300 	pdev = vdev->pdev;
6301 
6302 	/*
6303 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
6304 	 * from param0 to param3 according to below rule:
6305 	 *
6306 	 * PARAM:
6307 	 *   - config_param0 : start_offset (stats type)
6308 	 *   - config_param1 : stats bmask from start offset
6309 	 *   - config_param2 : stats bmask from start offset + 32
6310 	 *   - config_param3 : stats bmask from start offset + 64
6311 	 */
6312 	if (req->stats == CDP_TXRX_STATS_0) {
6313 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
6314 		req->param1 = 0xFFFFFFFF;
6315 		req->param2 = 0xFFFFFFFF;
6316 		req->param3 = 0xFFFFFFFF;
6317 	}
6318 
6319 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
6320 				req->param1, req->param2, req->param3,
6321 				0, 0, mac_id);
6322 }
6323 
6324 /**
6325  * dp_txrx_stats_request - function to map to firmware and host stats
6326  * @vdev: virtual handle
6327  * @req: stats request
6328  *
6329  * Return: integer
6330  */
6331 static int dp_txrx_stats_request(struct cdp_vdev *vdev,
6332 		struct cdp_txrx_stats_req *req)
6333 {
6334 	int host_stats;
6335 	int fw_stats;
6336 	enum cdp_stats stats;
6337 
6338 	if (!vdev || !req) {
6339 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6340 				"Invalid vdev/req instance");
6341 		return 0;
6342 	}
6343 
6344 	stats = req->stats;
6345 	if (stats >= CDP_TXRX_MAX_STATS)
6346 		return 0;
6347 
6348 	/*
6349 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
6350 	 *			has to be updated if new FW HTT stats added
6351 	 */
6352 	if (stats > CDP_TXRX_STATS_HTT_MAX)
6353 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
6354 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
6355 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
6356 
6357 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6358 		 "stats: %u fw_stats_type: %d host_stats_type: %d",
6359 		  stats, fw_stats, host_stats);
6360 
6361 	if (fw_stats != TXRX_FW_STATS_INVALID) {
6362 		/* update request with FW stats type */
6363 		req->stats = fw_stats;
6364 		return dp_fw_stats_process(vdev, req);
6365 	}
6366 
6367 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
6368 			(host_stats <= TXRX_HOST_STATS_MAX))
6369 		return dp_print_host_stats(vdev, host_stats);
6370 	else
6371 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6372 				"Wrong Input for TxRx Stats");
6373 
6374 	return 0;
6375 }
6376 
6377 /*
6378  * dp_print_napi_stats(): NAPI stats
6379  * @soc - soc handle
6380  */
6381 static void dp_print_napi_stats(struct dp_soc *soc)
6382 {
6383 	hif_print_napi_stats(soc->hif_handle);
6384 }
6385 
6386 /*
6387  * dp_print_per_ring_stats(): Packet count per ring
6388  * @soc - soc handle
6389  */
6390 static void dp_print_per_ring_stats(struct dp_soc *soc)
6391 {
6392 	uint8_t ring;
6393 	uint16_t core;
6394 	uint64_t total_packets;
6395 
6396 	DP_TRACE(FATAL, "Reo packets per ring:");
6397 	for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
6398 		total_packets = 0;
6399 		DP_TRACE(FATAL, "Packets on ring %u:", ring);
6400 		for (core = 0; core < NR_CPUS; core++) {
6401 			DP_TRACE(FATAL, "Packets arriving on core %u: %llu",
6402 				core, soc->stats.rx.ring_packets[core][ring]);
6403 			total_packets += soc->stats.rx.ring_packets[core][ring];
6404 		}
6405 		DP_TRACE(FATAL, "Total packets on ring %u: %llu",
6406 			ring, total_packets);
6407 	}
6408 }
6409 
6410 /*
6411  * dp_txrx_path_stats() - Function to display dump stats
6412  * @soc - soc handle
6413  *
6414  * return: none
6415  */
6416 static void dp_txrx_path_stats(struct dp_soc *soc)
6417 {
6418 	uint8_t error_code;
6419 	uint8_t loop_pdev;
6420 	struct dp_pdev *pdev;
6421 	uint8_t i;
6422 
6423 	for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
6424 
6425 		pdev = soc->pdev_list[loop_pdev];
6426 		dp_aggregate_pdev_stats(pdev);
6427 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6428 			"Tx path Statistics:");
6429 
6430 		DP_TRACE(FATAL, "from stack: %u msdus (%llu bytes)",
6431 			pdev->stats.tx_i.rcvd.num,
6432 			pdev->stats.tx_i.rcvd.bytes);
6433 		DP_TRACE(FATAL, "processed from host: %u msdus (%llu bytes)",
6434 			pdev->stats.tx_i.processed.num,
6435 			pdev->stats.tx_i.processed.bytes);
6436 		DP_TRACE(FATAL, "successfully transmitted: %u msdus (%llu bytes)",
6437 			pdev->stats.tx.tx_success.num,
6438 			pdev->stats.tx.tx_success.bytes);
6439 
6440 		DP_TRACE(FATAL, "Dropped in host:");
6441 		DP_TRACE(FATAL, "Total packets dropped: %u,",
6442 			pdev->stats.tx_i.dropped.dropped_pkt.num);
6443 		DP_TRACE(FATAL, "Descriptor not available: %u",
6444 			pdev->stats.tx_i.dropped.desc_na);
6445 		DP_TRACE(FATAL, "Ring full: %u",
6446 			pdev->stats.tx_i.dropped.ring_full);
6447 		DP_TRACE(FATAL, "Enqueue fail: %u",
6448 			pdev->stats.tx_i.dropped.enqueue_fail);
6449 		DP_TRACE(FATAL, "DMA Error: %u",
6450 			pdev->stats.tx_i.dropped.dma_error);
6451 
6452 		DP_TRACE(FATAL, "Dropped in hardware:");
6453 		DP_TRACE(FATAL, "total packets dropped: %u",
6454 			pdev->stats.tx.tx_failed);
6455 		DP_TRACE(FATAL, "mpdu age out: %u",
6456 			pdev->stats.tx.dropped.age_out);
6457 		DP_TRACE(FATAL, "firmware removed: %u",
6458 			pdev->stats.tx.dropped.fw_rem);
6459 		DP_TRACE(FATAL, "firmware removed tx: %u",
6460 			pdev->stats.tx.dropped.fw_rem_tx);
6461 		DP_TRACE(FATAL, "firmware removed notx %u",
6462 			pdev->stats.tx.dropped.fw_rem_notx);
6463 		DP_TRACE(FATAL, "peer_invalid: %u",
6464 			pdev->soc->stats.tx.tx_invalid_peer.num);
6465 
6466 
6467 		DP_TRACE(FATAL, "Tx packets sent per interrupt:");
6468 		DP_TRACE(FATAL, "Single Packet: %u",
6469 			pdev->stats.tx_comp_histogram.pkts_1);
6470 		DP_TRACE(FATAL, "2-20 Packets:  %u",
6471 			pdev->stats.tx_comp_histogram.pkts_2_20);
6472 		DP_TRACE(FATAL, "21-40 Packets: %u",
6473 			pdev->stats.tx_comp_histogram.pkts_21_40);
6474 		DP_TRACE(FATAL, "41-60 Packets: %u",
6475 			pdev->stats.tx_comp_histogram.pkts_41_60);
6476 		DP_TRACE(FATAL, "61-80 Packets: %u",
6477 			pdev->stats.tx_comp_histogram.pkts_61_80);
6478 		DP_TRACE(FATAL, "81-100 Packets: %u",
6479 			pdev->stats.tx_comp_histogram.pkts_81_100);
6480 		DP_TRACE(FATAL, "101-200 Packets: %u",
6481 			pdev->stats.tx_comp_histogram.pkts_101_200);
6482 		DP_TRACE(FATAL, "   201+ Packets: %u",
6483 			pdev->stats.tx_comp_histogram.pkts_201_plus);
6484 
6485 		DP_TRACE(FATAL, "Rx path statistics");
6486 
6487 		DP_TRACE(FATAL, "delivered %u msdus ( %llu bytes),",
6488 			pdev->stats.rx.to_stack.num,
6489 			pdev->stats.rx.to_stack.bytes);
6490 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)
6491 			DP_TRACE(FATAL, "received on reo[%d] %u msdus ( %llu bytes),",
6492 					i, pdev->stats.rx.rcvd_reo[i].num,
6493 					pdev->stats.rx.rcvd_reo[i].bytes);
6494 		DP_TRACE(FATAL, "intra-bss packets %u msdus ( %llu bytes),",
6495 			pdev->stats.rx.intra_bss.pkts.num,
6496 			pdev->stats.rx.intra_bss.pkts.bytes);
6497 		DP_TRACE(FATAL, "intra-bss fails %u msdus ( %llu bytes),",
6498 			pdev->stats.rx.intra_bss.fail.num,
6499 			pdev->stats.rx.intra_bss.fail.bytes);
6500 		DP_TRACE(FATAL, "raw packets %u msdus ( %llu bytes),",
6501 			pdev->stats.rx.raw.num,
6502 			pdev->stats.rx.raw.bytes);
6503 		DP_TRACE(FATAL, "dropped: error %u msdus",
6504 			pdev->stats.rx.err.mic_err);
6505 		DP_TRACE(FATAL, "peer invalid %u",
6506 			pdev->soc->stats.rx.err.rx_invalid_peer.num);
6507 
6508 		DP_TRACE(FATAL, "Reo Statistics");
6509 		DP_TRACE(FATAL, "rbm error: %u msdus",
6510 			pdev->soc->stats.rx.err.invalid_rbm);
6511 		DP_TRACE(FATAL, "hal ring access fail: %u msdus",
6512 			pdev->soc->stats.rx.err.hal_ring_access_fail);
6513 
6514 		DP_TRACE(FATAL, "Reo errors");
6515 
6516 		for (error_code = 0; error_code < HAL_REO_ERR_MAX;
6517 				error_code++) {
6518 			DP_TRACE(FATAL, "Reo error number (%u): %u msdus",
6519 				error_code,
6520 				pdev->soc->stats.rx.err.reo_error[error_code]);
6521 		}
6522 
6523 		for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
6524 				error_code++) {
6525 			DP_TRACE(FATAL, "Rxdma error number (%u): %u msdus",
6526 				error_code,
6527 				pdev->soc->stats.rx.err
6528 				.rxdma_error[error_code]);
6529 		}
6530 
6531 		DP_TRACE(FATAL, "Rx packets reaped per interrupt:");
6532 		DP_TRACE(FATAL, "Single Packet: %u",
6533 			 pdev->stats.rx_ind_histogram.pkts_1);
6534 		DP_TRACE(FATAL, "2-20 Packets:  %u",
6535 			 pdev->stats.rx_ind_histogram.pkts_2_20);
6536 		DP_TRACE(FATAL, "21-40 Packets: %u",
6537 			 pdev->stats.rx_ind_histogram.pkts_21_40);
6538 		DP_TRACE(FATAL, "41-60 Packets: %u",
6539 			 pdev->stats.rx_ind_histogram.pkts_41_60);
6540 		DP_TRACE(FATAL, "61-80 Packets: %u",
6541 			 pdev->stats.rx_ind_histogram.pkts_61_80);
6542 		DP_TRACE(FATAL, "81-100 Packets: %u",
6543 			 pdev->stats.rx_ind_histogram.pkts_81_100);
6544 		DP_TRACE(FATAL, "101-200 Packets: %u",
6545 			 pdev->stats.rx_ind_histogram.pkts_101_200);
6546 		DP_TRACE(FATAL, "   201+ Packets: %u",
6547 			 pdev->stats.rx_ind_histogram.pkts_201_plus);
6548 
6549 		DP_TRACE_STATS(ERROR, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
6550 			__func__,
6551 			pdev->soc->wlan_cfg_ctx->tso_enabled,
6552 			pdev->soc->wlan_cfg_ctx->lro_enabled,
6553 			pdev->soc->wlan_cfg_ctx->rx_hash,
6554 			pdev->soc->wlan_cfg_ctx->napi_enabled);
6555 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6556 		DP_TRACE_STATS(ERROR, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
6557 			__func__,
6558 			pdev->soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold,
6559 			pdev->soc->wlan_cfg_ctx->tx_flow_start_queue_offset);
6560 #endif
6561 	}
6562 }
6563 
6564 /*
6565  * dp_txrx_dump_stats() -  Dump statistics
6566  * @value - Statistics option
6567  */
6568 static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
6569 				     enum qdf_stats_verbosity_level level)
6570 {
6571 	struct dp_soc *soc =
6572 		(struct dp_soc *)psoc;
6573 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6574 
6575 	if (!soc) {
6576 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6577 			"%s: soc is NULL", __func__);
6578 		return QDF_STATUS_E_INVAL;
6579 	}
6580 
6581 	switch (value) {
6582 	case CDP_TXRX_PATH_STATS:
6583 		dp_txrx_path_stats(soc);
6584 		break;
6585 
6586 	case CDP_RX_RING_STATS:
6587 		dp_print_per_ring_stats(soc);
6588 		break;
6589 
6590 	case CDP_TXRX_TSO_STATS:
6591 		/* TODO: NOT IMPLEMENTED */
6592 		break;
6593 
6594 	case CDP_DUMP_TX_FLOW_POOL_INFO:
6595 		cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
6596 		break;
6597 
6598 	case CDP_DP_NAPI_STATS:
6599 		dp_print_napi_stats(soc);
6600 		break;
6601 
6602 	case CDP_TXRX_DESC_STATS:
6603 		/* TODO: NOT IMPLEMENTED */
6604 		break;
6605 
6606 	default:
6607 		status = QDF_STATUS_E_INVAL;
6608 		break;
6609 	}
6610 
6611 	return status;
6612 
6613 }
6614 
6615 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6616 /**
6617  * dp_update_flow_control_parameters() - API to store datapath
6618  *                            config parameters
6619  * @soc: soc handle
6620  * @cfg: ini parameter handle
6621  *
6622  * Return: void
6623  */
6624 static inline
6625 void dp_update_flow_control_parameters(struct dp_soc *soc,
6626 				struct cdp_config_params *params)
6627 {
6628 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
6629 					params->tx_flow_stop_queue_threshold;
6630 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
6631 					params->tx_flow_start_queue_offset;
6632 }
6633 #else
6634 static inline
6635 void dp_update_flow_control_parameters(struct dp_soc *soc,
6636 				struct cdp_config_params *params)
6637 {
6638 }
6639 #endif
6640 
6641 /**
6642  * dp_update_config_parameters() - API to store datapath
6643  *                            config parameters
6644  * @soc: soc handle
6645  * @cfg: ini parameter handle
6646  *
6647  * Return: status
6648  */
6649 static
6650 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
6651 				struct cdp_config_params *params)
6652 {
6653 	struct dp_soc *soc = (struct dp_soc *)psoc;
6654 
6655 	if (!(soc)) {
6656 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6657 				"%s: Invalid handle", __func__);
6658 		return QDF_STATUS_E_INVAL;
6659 	}
6660 
6661 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
6662 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
6663 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
6664 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
6665 				params->tcp_udp_checksumoffload;
6666 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
6667 
6668 	dp_update_flow_control_parameters(soc, params);
6669 
6670 	return QDF_STATUS_SUCCESS;
6671 }
6672 
6673 /**
6674  * dp_txrx_set_wds_rx_policy() - API to store datapath
6675  *                            config parameters
6676  * @vdev_handle - datapath vdev handle
6677  * @cfg: ini parameter handle
6678  *
6679  * Return: status
6680  */
6681 #ifdef WDS_VENDOR_EXTENSION
6682 void
6683 dp_txrx_set_wds_rx_policy(
6684 		struct cdp_vdev *vdev_handle,
6685 		u_int32_t val)
6686 {
6687 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6688 	struct dp_peer *peer;
6689 	if (vdev->opmode == wlan_op_mode_ap) {
6690 		/* for ap, set it on bss_peer */
6691 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6692 			if (peer->bss_peer) {
6693 				peer->wds_ecm.wds_rx_filter = 1;
6694 				peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
6695 				peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
6696 				break;
6697 			}
6698 		}
6699 	} else if (vdev->opmode == wlan_op_mode_sta) {
6700 		peer = TAILQ_FIRST(&vdev->peer_list);
6701 		peer->wds_ecm.wds_rx_filter = 1;
6702 		peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
6703 		peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
6704 	}
6705 }
6706 
6707 /**
6708  * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
6709  *
6710  * @peer_handle - datapath peer handle
6711  * @wds_tx_ucast: policy for unicast transmission
6712  * @wds_tx_mcast: policy for multicast transmission
6713  *
6714  * Return: void
6715  */
6716 void
6717 dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
6718 		int wds_tx_ucast, int wds_tx_mcast)
6719 {
6720 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6721 	if (wds_tx_ucast || wds_tx_mcast) {
6722 		peer->wds_enabled = 1;
6723 		peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
6724 		peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
6725 	} else {
6726 		peer->wds_enabled = 0;
6727 		peer->wds_ecm.wds_tx_ucast_4addr = 0;
6728 		peer->wds_ecm.wds_tx_mcast_4addr = 0;
6729 	}
6730 
6731 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6732 			FL("Policy Update set to :\
6733 				peer->wds_enabled %d\
6734 				peer->wds_ecm.wds_tx_ucast_4addr %d\
6735 				peer->wds_ecm.wds_tx_mcast_4addr %d\n"),
6736 				peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
6737 				peer->wds_ecm.wds_tx_mcast_4addr);
6738 	return;
6739 }
6740 #endif
6741 
6742 static struct cdp_wds_ops dp_ops_wds = {
6743 	.vdev_set_wds = dp_vdev_set_wds,
6744 #ifdef WDS_VENDOR_EXTENSION
6745 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
6746 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
6747 #endif
6748 };
6749 
6750 /*
6751  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
6752  * @soc - datapath soc handle
6753  * @peer - datapath peer handle
6754  *
6755  * Delete the AST entries belonging to a peer
6756  */
6757 #ifdef FEATURE_AST
6758 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
6759 		struct dp_peer *peer)
6760 {
6761 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
6762 
6763 	qdf_spin_lock_bh(&soc->ast_lock);
6764 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
6765 		dp_peer_del_ast(soc, ast_entry);
6766 
6767 	qdf_spin_unlock_bh(&soc->ast_lock);
6768 }
6769 #else
6770 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
6771 		struct dp_peer *peer)
6772 {
6773 }
6774 #endif
6775 
6776 /*
6777  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
6778  * @vdev_handle - datapath vdev handle
6779  * @callback - callback function
6780  * @ctxt: callback context
6781  *
6782  */
6783 static void
6784 dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
6785 		       ol_txrx_data_tx_cb callback, void *ctxt)
6786 {
6787 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6788 
6789 	vdev->tx_non_std_data_callback.func = callback;
6790 	vdev->tx_non_std_data_callback.ctxt = ctxt;
6791 }
6792 
6793 /**
6794  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
6795  * @pdev_hdl: datapath pdev handle
6796  *
6797  * Return: opaque pointer to dp txrx handle
6798  */
6799 static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
6800 {
6801 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
6802 
6803 	return pdev->dp_txrx_handle;
6804 }
6805 
6806 /**
6807  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
6808  * @pdev_hdl: datapath pdev handle
6809  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
6810  *
6811  * Return: void
6812  */
6813 static void
6814 dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
6815 {
6816 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
6817 
6818 	pdev->dp_txrx_handle = dp_txrx_hdl;
6819 }
6820 
6821 /**
6822  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
6823  * @soc_handle: datapath soc handle
6824  *
6825  * Return: opaque pointer to external dp (non-core DP)
6826  */
6827 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
6828 {
6829 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
6830 
6831 	return soc->external_txrx_handle;
6832 }
6833 
6834 /**
6835  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
6836  * @soc_handle: datapath soc handle
6837  * @txrx_handle: opaque pointer to external dp (non-core DP)
6838  *
6839  * Return: void
6840  */
6841 static void
6842 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
6843 {
6844 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
6845 
6846 	soc->external_txrx_handle = txrx_handle;
6847 }
6848 
6849 #ifdef FEATURE_AST
6850 static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
6851 {
6852 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
6853 	struct dp_peer *peer = (struct dp_peer *) peer_hdl;
6854 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
6855 
6856 	/*
6857 	 * For BSS peer, new peer is not created on alloc_node if the
6858 	 * peer with same address already exists , instead refcnt is
6859 	 * increased for existing peer. Correspondingly in delete path,
6860 	 * only refcnt is decreased; and peer is only deleted , when all
6861 	 * references are deleted. So delete_in_progress should not be set
6862 	 * for bss_peer, unless only 2 reference remains (peer map reference
6863 	 * and peer hash table reference).
6864 	 */
6865 	if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
6866 		return;
6867 	}
6868 
6869 	peer->delete_in_progress = true;
6870 	dp_peer_delete_ast_entries(soc, peer);
6871 }
6872 #endif
6873 
6874 #ifdef ATH_SUPPORT_NAC_RSSI
6875 static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
6876 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
6877 		uint8_t chan_num)
6878 {
6879 
6880 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6881 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6882 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
6883 
6884 	pdev->nac_rssi_filtering = 1;
6885 	/* Store address of NAC (neighbour peer) which will be checked
6886 	 * against TA of received packets.
6887 	 */
6888 
6889 	if (cmd == CDP_NAC_PARAM_ADD) {
6890 		qdf_mem_copy(vdev->cdp_nac_rssi.client_mac,
6891 				client_macaddr, DP_MAC_ADDR_LEN);
6892 		vdev->cdp_nac_rssi_enabled = 1;
6893 	} else if (cmd == CDP_NAC_PARAM_DEL) {
6894 		if (!qdf_mem_cmp(vdev->cdp_nac_rssi.client_mac,
6895 			client_macaddr, DP_MAC_ADDR_LEN)) {
6896 				/* delete this peer from the list */
6897 			qdf_mem_zero(vdev->cdp_nac_rssi.client_mac,
6898 				DP_MAC_ADDR_LEN);
6899 		}
6900 		vdev->cdp_nac_rssi_enabled = 0;
6901 	}
6902 
6903 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
6904 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
6905 			(vdev->pdev->osif_pdev, vdev->vdev_id, cmd, bssid);
6906 
6907 	return QDF_STATUS_SUCCESS;
6908 }
6909 #endif
6910 
6911 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
6912 		uint32_t max_peers)
6913 {
6914 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6915 
6916 	soc->max_peers = max_peers;
6917 
6918 	qdf_print ("%s max_peers %u\n", __func__, max_peers);
6919 
6920 	if (dp_peer_find_attach(soc))
6921 		return QDF_STATUS_E_FAILURE;
6922 
6923 	return QDF_STATUS_SUCCESS;
6924 }
6925 
6926 static struct cdp_cmn_ops dp_ops_cmn = {
6927 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
6928 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
6929 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
6930 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
6931 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
6932 	.txrx_peer_create = dp_peer_create_wifi3,
6933 	.txrx_peer_setup = dp_peer_setup_wifi3,
6934 #ifdef FEATURE_AST
6935 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
6936 #else
6937 	.txrx_peer_teardown = NULL,
6938 #endif
6939 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
6940 	.txrx_peer_del_ast = dp_peer_del_ast_wifi3,
6941 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
6942 	.txrx_peer_ast_hash_find = dp_peer_ast_hash_find_wifi3,
6943 	.txrx_peer_ast_get_pdev_id = dp_peer_ast_get_pdev_id_wifi3,
6944 	.txrx_peer_ast_get_next_hop = dp_peer_ast_get_next_hop_wifi3,
6945 	.txrx_peer_ast_set_type = dp_peer_ast_set_type_wifi3,
6946 	.txrx_peer_delete = dp_peer_delete_wifi3,
6947 	.txrx_vdev_register = dp_vdev_register_wifi3,
6948 	.txrx_soc_detach = dp_soc_detach_wifi3,
6949 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
6950 	.txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
6951 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
6952 	.txrx_ath_getstats = dp_get_device_stats,
6953 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
6954 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
6955 	.delba_process = dp_delba_process_wifi3,
6956 	.set_addba_response = dp_set_addba_response,
6957 	.get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
6958 	.flush_cache_rx_queue = NULL,
6959 	/* TODO: get API's for dscp-tid need to be added*/
6960 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
6961 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
6962 	.txrx_stats_request = dp_txrx_stats_request,
6963 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
6964 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
6965 	.txrx_set_nac = dp_set_nac,
6966 	.txrx_get_tx_pending = dp_get_tx_pending,
6967 	.txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
6968 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
6969 	.display_stats = dp_txrx_dump_stats,
6970 	.txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
6971 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
6972 #ifdef DP_INTR_POLL_BASED
6973 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
6974 #else
6975 	.txrx_intr_attach = dp_soc_interrupt_attach,
6976 #endif
6977 	.txrx_intr_detach = dp_soc_interrupt_detach,
6978 	.set_pn_check = dp_set_pn_check_wifi3,
6979 	.update_config_parameters = dp_update_config_parameters,
6980 	/* TODO: Add other functions */
6981 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
6982 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
6983 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
6984 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
6985 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
6986 	.tx_send = dp_tx_send,
6987 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
6988 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
6989 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
6990 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
6991 };
6992 
6993 static struct cdp_ctrl_ops dp_ops_ctrl = {
6994 	.txrx_peer_authorize = dp_peer_authorize,
6995 #ifdef QCA_SUPPORT_SON
6996 	.txrx_set_inact_params = dp_set_inact_params,
6997 	.txrx_start_inact_timer = dp_start_inact_timer,
6998 	.txrx_set_overload = dp_set_overload,
6999 	.txrx_peer_is_inact = dp_peer_is_inact,
7000 	.txrx_mark_peer_inact = dp_mark_peer_inact,
7001 #endif
7002 	.txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
7003 	.txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
7004 #ifdef MESH_MODE_SUPPORT
7005 	.txrx_set_mesh_mode  = dp_peer_set_mesh_mode,
7006 	.txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
7007 #endif
7008 	.txrx_set_vdev_param = dp_set_vdev_param,
7009 	.txrx_peer_set_nawds = dp_peer_set_nawds,
7010 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
7011 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
7012 	.txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
7013 	.txrx_update_filter_neighbour_peers =
7014 		dp_update_filter_neighbour_peers,
7015 	.txrx_get_sec_type = dp_get_sec_type,
7016 	/* TODO: Add other functions */
7017 	.txrx_wdi_event_sub = dp_wdi_event_sub,
7018 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
7019 #ifdef WDI_EVENT_ENABLE
7020 	.txrx_get_pldev = dp_get_pldev,
7021 #endif
7022 	.txrx_set_pdev_param = dp_set_pdev_param,
7023 #ifdef ATH_SUPPORT_NAC_RSSI
7024 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
7025 #endif
7026 };
7027 
7028 static struct cdp_me_ops dp_ops_me = {
7029 #ifdef ATH_SUPPORT_IQUE
7030 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
7031 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
7032 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
7033 #endif
7034 };
7035 
7036 static struct cdp_mon_ops dp_ops_mon = {
7037 	.txrx_monitor_set_filter_ucast_data = NULL,
7038 	.txrx_monitor_set_filter_mcast_data = NULL,
7039 	.txrx_monitor_set_filter_non_data = NULL,
7040 	.txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
7041 	.txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
7042 	.txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
7043 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
7044 	/* Added support for HK advance filter */
7045 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
7046 };
7047 
7048 static struct cdp_host_stats_ops dp_ops_host_stats = {
7049 	.txrx_per_peer_stats = dp_get_host_peer_stats,
7050 	.get_fw_peer_stats = dp_get_fw_peer_stats,
7051 	.get_htt_stats = dp_get_htt_stats,
7052 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
7053 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
7054 	.txrx_stats_publish = dp_txrx_stats_publish,
7055 	/* TODO */
7056 };
7057 
7058 static struct cdp_raw_ops dp_ops_raw = {
7059 	/* TODO */
7060 };
7061 
7062 #ifdef CONFIG_WIN
7063 static struct cdp_pflow_ops dp_ops_pflow = {
7064 	/* TODO */
7065 };
7066 #endif /* CONFIG_WIN */
7067 
7068 #ifdef FEATURE_RUNTIME_PM
7069 /**
7070  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
7071  * @opaque_pdev: DP pdev context
7072  *
7073  * DP is ready to runtime suspend if there are no pending TX packets.
7074  *
7075  * Return: QDF_STATUS
7076  */
7077 static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
7078 {
7079 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7080 	struct dp_soc *soc = pdev->soc;
7081 
7082 	/* Call DP TX flow control API to check if there is any
7083 	   pending packets */
7084 
7085 	if (soc->intr_mode == DP_INTR_POLL)
7086 		qdf_timer_stop(&soc->int_timer);
7087 
7088 	return QDF_STATUS_SUCCESS;
7089 }
7090 
7091 /**
7092  * dp_runtime_resume() - ensure DP is ready to runtime resume
7093  * @opaque_pdev: DP pdev context
7094  *
7095  * Resume DP for runtime PM.
7096  *
7097  * Return: QDF_STATUS
7098  */
7099 static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
7100 {
7101 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7102 	struct dp_soc *soc = pdev->soc;
7103 	void *hal_srng;
7104 	int i;
7105 
7106 	if (soc->intr_mode == DP_INTR_POLL)
7107 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7108 
7109 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
7110 		hal_srng = soc->tcl_data_ring[i].hal_srng;
7111 		if (hal_srng) {
7112 			/* We actually only need to acquire the lock */
7113 			hal_srng_access_start(soc->hal_soc, hal_srng);
7114 			/* Update SRC ring head pointer for HW to send
7115 			   all pending packets */
7116 			hal_srng_access_end(soc->hal_soc, hal_srng);
7117 		}
7118 	}
7119 
7120 	return QDF_STATUS_SUCCESS;
7121 }
7122 #endif /* FEATURE_RUNTIME_PM */
7123 
7124 static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
7125 {
7126 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7127 	struct dp_soc *soc = pdev->soc;
7128 
7129 	if (soc->intr_mode == DP_INTR_POLL)
7130 		qdf_timer_stop(&soc->int_timer);
7131 
7132 	return QDF_STATUS_SUCCESS;
7133 }
7134 
7135 static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
7136 {
7137 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7138 	struct dp_soc *soc = pdev->soc;
7139 
7140 	if (soc->intr_mode == DP_INTR_POLL)
7141 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7142 
7143 	return QDF_STATUS_SUCCESS;
7144 }
7145 
7146 #ifndef CONFIG_WIN
7147 static struct cdp_misc_ops dp_ops_misc = {
7148 	.tx_non_std = dp_tx_non_std,
7149 	.get_opmode = dp_get_opmode,
7150 #ifdef FEATURE_RUNTIME_PM
7151 	.runtime_suspend = dp_runtime_suspend,
7152 	.runtime_resume = dp_runtime_resume,
7153 #endif /* FEATURE_RUNTIME_PM */
7154 	.pkt_log_init = dp_pkt_log_init,
7155 	.pkt_log_con_service = dp_pkt_log_con_service,
7156 };
7157 
7158 static struct cdp_flowctl_ops dp_ops_flowctl = {
7159 	/* WIFI 3.0 DP implement as required. */
7160 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7161 	.flow_pool_map_handler = dp_tx_flow_pool_map,
7162 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
7163 	.register_pause_cb = dp_txrx_register_pause_cb,
7164 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
7165 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
7166 };
7167 
7168 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
7169 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7170 };
7171 
7172 #ifdef IPA_OFFLOAD
7173 static struct cdp_ipa_ops dp_ops_ipa = {
7174 	.ipa_get_resource = dp_ipa_get_resource,
7175 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
7176 	.ipa_op_response = dp_ipa_op_response,
7177 	.ipa_register_op_cb = dp_ipa_register_op_cb,
7178 	.ipa_get_stat = dp_ipa_get_stat,
7179 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
7180 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
7181 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
7182 	.ipa_setup = dp_ipa_setup,
7183 	.ipa_cleanup = dp_ipa_cleanup,
7184 	.ipa_setup_iface = dp_ipa_setup_iface,
7185 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
7186 	.ipa_enable_pipes = dp_ipa_enable_pipes,
7187 	.ipa_disable_pipes = dp_ipa_disable_pipes,
7188 	.ipa_set_perf_level = dp_ipa_set_perf_level
7189 };
7190 #endif
7191 
7192 static struct cdp_bus_ops dp_ops_bus = {
7193 	.bus_suspend = dp_bus_suspend,
7194 	.bus_resume = dp_bus_resume
7195 };
7196 
7197 static struct cdp_ocb_ops dp_ops_ocb = {
7198 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7199 };
7200 
7201 
7202 static struct cdp_throttle_ops dp_ops_throttle = {
7203 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7204 };
7205 
7206 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
7207 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7208 };
7209 
7210 static struct cdp_cfg_ops dp_ops_cfg = {
7211 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7212 };
7213 
7214 /*
7215  * dp_wrapper_peer_get_ref_by_addr - wrapper function to get to peer
7216  * @dev: physical device instance
7217  * @peer_mac_addr: peer mac address
7218  * @local_id: local id for the peer
7219  * @debug_id: to track enum peer access
7220 
7221  * Return: peer instance pointer
7222  */
7223 static inline void *
7224 dp_wrapper_peer_get_ref_by_addr(struct cdp_pdev *dev, u8 *peer_mac_addr,
7225 				u8 *local_id,
7226 				enum peer_debug_id_type debug_id)
7227 {
7228 	/*
7229 	 * Currently this function does not implement the "get ref"
7230 	 * functionality and is mapped to dp_find_peer_by_addr which does not
7231 	 * increment the peer ref count. So the peer state is uncertain after
7232 	 * calling this API. The functionality needs to be implemented.
7233 	 * Accordingly the corresponding release_ref function is NULL.
7234 	 */
7235 	return dp_find_peer_by_addr(dev, peer_mac_addr, local_id);
7236 }
7237 
7238 static struct cdp_peer_ops dp_ops_peer = {
7239 	.register_peer = dp_register_peer,
7240 	.clear_peer = dp_clear_peer,
7241 	.find_peer_by_addr = dp_find_peer_by_addr,
7242 	.find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
7243 	.peer_get_ref_by_addr = dp_wrapper_peer_get_ref_by_addr,
7244 	.peer_release_ref = NULL,
7245 	.local_peer_id = dp_local_peer_id,
7246 	.peer_find_by_local_id = dp_peer_find_by_local_id,
7247 	.peer_state_update = dp_peer_state_update,
7248 	.get_vdevid = dp_get_vdevid,
7249 	.get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
7250 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
7251 	.get_vdev_for_peer = dp_get_vdev_for_peer,
7252 	.get_peer_state = dp_get_peer_state,
7253 	.last_assoc_received = dp_get_last_assoc_received,
7254 	.last_disassoc_received = dp_get_last_disassoc_received,
7255 	.last_deauth_received = dp_get_last_deauth_received,
7256 };
7257 #endif
7258 
7259 static struct cdp_ops dp_txrx_ops = {
7260 	.cmn_drv_ops = &dp_ops_cmn,
7261 	.ctrl_ops = &dp_ops_ctrl,
7262 	.me_ops = &dp_ops_me,
7263 	.mon_ops = &dp_ops_mon,
7264 	.host_stats_ops = &dp_ops_host_stats,
7265 	.wds_ops = &dp_ops_wds,
7266 	.raw_ops = &dp_ops_raw,
7267 #ifdef CONFIG_WIN
7268 	.pflow_ops = &dp_ops_pflow,
7269 #endif /* CONFIG_WIN */
7270 #ifndef CONFIG_WIN
7271 	.misc_ops = &dp_ops_misc,
7272 	.cfg_ops = &dp_ops_cfg,
7273 	.flowctl_ops = &dp_ops_flowctl,
7274 	.l_flowctl_ops = &dp_ops_l_flowctl,
7275 #ifdef IPA_OFFLOAD
7276 	.ipa_ops = &dp_ops_ipa,
7277 #endif
7278 	.bus_ops = &dp_ops_bus,
7279 	.ocb_ops = &dp_ops_ocb,
7280 	.peer_ops = &dp_ops_peer,
7281 	.throttle_ops = &dp_ops_throttle,
7282 	.mob_stats_ops = &dp_ops_mob_stats,
7283 #endif
7284 };
7285 
7286 /*
7287  * dp_soc_set_txrx_ring_map()
7288  * @dp_soc: DP handler for soc
7289  *
7290  * Return: Void
7291  */
7292 static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
7293 {
7294 	uint32_t i;
7295 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
7296 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_DEFAULT_MAP][i];
7297 	}
7298 }
7299 
7300 /*
7301  * dp_soc_attach_wifi3() - Attach txrx SOC
7302  * @ctrl_psoc:	Opaque SOC handle from control plane
7303  * @htc_handle:	Opaque HTC handle
7304  * @hif_handle:	Opaque HIF handle
7305  * @qdf_osdev:	QDF device
7306  *
7307  * Return: DP SOC handle on success, NULL on failure
7308  */
7309 /*
7310  * Local prototype added to temporarily address warning caused by
7311  * -Wmissing-prototypes. A more correct solution, namely to expose
7312  * a prototype in an appropriate header file, will come later.
7313  */
7314 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
7315 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
7316 	struct ol_if_ops *ol_ops);
7317 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
7318 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
7319 	struct ol_if_ops *ol_ops)
7320 {
7321 	struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
7322 
7323 	if (!soc) {
7324 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7325 			FL("DP SOC memory allocation failed"));
7326 		goto fail0;
7327 	}
7328 
7329 	soc->cdp_soc.ops = &dp_txrx_ops;
7330 	soc->cdp_soc.ol_ops = ol_ops;
7331 	soc->ctrl_psoc = ctrl_psoc;
7332 	soc->osdev = qdf_osdev;
7333 	soc->hif_handle = hif_handle;
7334 
7335 	soc->hal_soc = hif_get_hal_handle(hif_handle);
7336 	soc->htt_handle = htt_soc_attach(soc, ctrl_psoc, htc_handle,
7337 		soc->hal_soc, qdf_osdev);
7338 	if (!soc->htt_handle) {
7339 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7340 			FL("HTT attach failed"));
7341 		goto fail1;
7342 	}
7343 
7344 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach();
7345 	if (!soc->wlan_cfg_ctx) {
7346 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7347 				FL("wlan_cfg_soc_attach failed"));
7348 		goto fail2;
7349 	}
7350 
7351 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx, rx_hash);
7352 	soc->cce_disable = false;
7353 
7354 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
7355 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
7356 				CDP_CFG_MAX_PEER_ID);
7357 
7358 		if (ret != -EINVAL) {
7359 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
7360 		}
7361 
7362 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
7363 				CDP_CFG_CCE_DISABLE);
7364 		if (ret == 1)
7365 			soc->cce_disable = true;
7366 	}
7367 
7368 	qdf_spinlock_create(&soc->peer_ref_mutex);
7369 
7370 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
7371 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
7372 
7373 	/* fill the tx/rx cpu ring map*/
7374 	dp_soc_set_txrx_ring_map(soc);
7375 
7376 	qdf_spinlock_create(&soc->htt_stats.lock);
7377 	/* initialize work queue for stats processing */
7378 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
7379 
7380 	/*Initialize inactivity timer for wifison */
7381 	dp_init_inact_timer(soc);
7382 
7383 	return (void *)soc;
7384 
7385 fail2:
7386 	htt_soc_detach(soc->htt_handle);
7387 fail1:
7388 	qdf_mem_free(soc);
7389 fail0:
7390 	return NULL;
7391 }
7392 
7393 /*
7394  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
7395  *
7396  * @soc: handle to DP soc
7397  * @mac_id: MAC id
7398  *
7399  * Return: Return pdev corresponding to MAC
7400  */
7401 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
7402 {
7403 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
7404 		return soc->pdev_list[mac_id];
7405 
7406 	/* Typically for MCL as there only 1 PDEV*/
7407 	return soc->pdev_list[0];
7408 }
7409 
7410 /*
7411  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
7412  * @soc:		DP SoC context
7413  * @max_mac_rings:	No of MAC rings
7414  *
7415  * Return: None
7416  */
7417 static
7418 void dp_is_hw_dbs_enable(struct dp_soc *soc,
7419 				int *max_mac_rings)
7420 {
7421 	bool dbs_enable = false;
7422 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
7423 		dbs_enable = soc->cdp_soc.ol_ops->
7424 		is_hw_dbs_2x2_capable(soc->ctrl_psoc);
7425 
7426 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
7427 }
7428 
7429 /*
7430 * dp_set_pktlog_wifi3() - attach txrx vdev
7431 * @pdev: Datapath PDEV handle
7432 * @event: which event's notifications are being subscribed to
7433 * @enable: WDI event subscribe or not. (True or False)
7434 *
7435 * Return: Success, NULL on failure
7436 */
7437 #ifdef WDI_EVENT_ENABLE
7438 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
7439 	bool enable)
7440 {
7441 	struct dp_soc *soc = pdev->soc;
7442 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
7443 	int max_mac_rings = wlan_cfg_get_num_mac_rings
7444 					(pdev->wlan_cfg_ctx);
7445 	uint8_t mac_id = 0;
7446 
7447 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
7448 
7449 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7450 			FL("Max_mac_rings %d \n"),
7451 			max_mac_rings);
7452 
7453 	if (enable) {
7454 		switch (event) {
7455 		case WDI_EVENT_RX_DESC:
7456 			if (pdev->monitor_vdev) {
7457 				/* Nothing needs to be done if monitor mode is
7458 				 * enabled
7459 				 */
7460 				return 0;
7461 			}
7462 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
7463 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
7464 				htt_tlv_filter.mpdu_start = 1;
7465 				htt_tlv_filter.msdu_start = 1;
7466 				htt_tlv_filter.msdu_end = 1;
7467 				htt_tlv_filter.mpdu_end = 1;
7468 				htt_tlv_filter.packet_header = 1;
7469 				htt_tlv_filter.attention = 1;
7470 				htt_tlv_filter.ppdu_start = 1;
7471 				htt_tlv_filter.ppdu_end = 1;
7472 				htt_tlv_filter.ppdu_end_user_stats = 1;
7473 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
7474 				htt_tlv_filter.ppdu_end_status_done = 1;
7475 				htt_tlv_filter.enable_fp = 1;
7476 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
7477 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
7478 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
7479 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
7480 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
7481 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
7482 
7483 				for (mac_id = 0; mac_id < max_mac_rings;
7484 								mac_id++) {
7485 					int mac_for_pdev =
7486 						dp_get_mac_id_for_pdev(mac_id,
7487 								pdev->pdev_id);
7488 
7489 					htt_h2t_rx_ring_cfg(soc->htt_handle,
7490 					 mac_for_pdev,
7491 					 pdev->rxdma_mon_status_ring[mac_id]
7492 					 .hal_srng,
7493 					 RXDMA_MONITOR_STATUS,
7494 					 RX_BUFFER_SIZE,
7495 					 &htt_tlv_filter);
7496 
7497 				}
7498 
7499 				if (soc->reap_timer_init)
7500 					qdf_timer_mod(&soc->mon_reap_timer,
7501 					DP_INTR_POLL_TIMER_MS);
7502 			}
7503 			break;
7504 
7505 		case WDI_EVENT_LITE_RX:
7506 			if (pdev->monitor_vdev) {
7507 				/* Nothing needs to be done if monitor mode is
7508 				 * enabled
7509 				 */
7510 				return 0;
7511 			}
7512 
7513 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
7514 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
7515 
7516 				htt_tlv_filter.ppdu_start = 1;
7517 				htt_tlv_filter.ppdu_end = 1;
7518 				htt_tlv_filter.ppdu_end_user_stats = 1;
7519 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
7520 				htt_tlv_filter.ppdu_end_status_done = 1;
7521 				htt_tlv_filter.mpdu_start = 1;
7522 				htt_tlv_filter.enable_fp = 1;
7523 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
7524 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
7525 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
7526 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
7527 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
7528 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
7529 
7530 				for (mac_id = 0; mac_id < max_mac_rings;
7531 								mac_id++) {
7532 					int mac_for_pdev =
7533 						dp_get_mac_id_for_pdev(mac_id,
7534 								pdev->pdev_id);
7535 
7536 					htt_h2t_rx_ring_cfg(soc->htt_handle,
7537 					mac_for_pdev,
7538 					pdev->rxdma_mon_status_ring[mac_id]
7539 					.hal_srng,
7540 					RXDMA_MONITOR_STATUS,
7541 					RX_BUFFER_SIZE_PKTLOG_LITE,
7542 					&htt_tlv_filter);
7543 				}
7544 
7545 				if (soc->reap_timer_init)
7546 					qdf_timer_mod(&soc->mon_reap_timer,
7547 					DP_INTR_POLL_TIMER_MS);
7548 			}
7549 			break;
7550 
7551 		case WDI_EVENT_LITE_T2H:
7552 			if (pdev->monitor_vdev) {
7553 				/* Nothing needs to be done if monitor mode is
7554 				 * enabled
7555 				 */
7556 				return 0;
7557 			}
7558 
7559 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
7560 				int mac_for_pdev = dp_get_mac_id_for_pdev(
7561 							mac_id,	pdev->pdev_id);
7562 
7563 				pdev->pktlog_ppdu_stats = true;
7564 				dp_h2t_cfg_stats_msg_send(pdev,
7565 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
7566 					mac_for_pdev);
7567 			}
7568 			break;
7569 
7570 		default:
7571 			/* Nothing needs to be done for other pktlog types */
7572 			break;
7573 		}
7574 	} else {
7575 		switch (event) {
7576 		case WDI_EVENT_RX_DESC:
7577 		case WDI_EVENT_LITE_RX:
7578 			if (pdev->monitor_vdev) {
7579 				/* Nothing needs to be done if monitor mode is
7580 				 * enabled
7581 				 */
7582 				return 0;
7583 			}
7584 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
7585 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
7586 
7587 				for (mac_id = 0; mac_id < max_mac_rings;
7588 								mac_id++) {
7589 					int mac_for_pdev =
7590 						dp_get_mac_id_for_pdev(mac_id,
7591 								pdev->pdev_id);
7592 
7593 					htt_h2t_rx_ring_cfg(soc->htt_handle,
7594 					  mac_for_pdev,
7595 					  pdev->rxdma_mon_status_ring[mac_id]
7596 					  .hal_srng,
7597 					  RXDMA_MONITOR_STATUS,
7598 					  RX_BUFFER_SIZE,
7599 					  &htt_tlv_filter);
7600 				}
7601 
7602 				if (soc->reap_timer_init)
7603 					qdf_timer_stop(&soc->mon_reap_timer);
7604 			}
7605 			break;
7606 		case WDI_EVENT_LITE_T2H:
7607 			if (pdev->monitor_vdev) {
7608 				/* Nothing needs to be done if monitor mode is
7609 				 * enabled
7610 				 */
7611 				return 0;
7612 			}
7613 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
7614 			 * passing value 0. Once these macros will define in htt
7615 			 * header file will use proper macros
7616 			*/
7617 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
7618 				int mac_for_pdev =
7619 						dp_get_mac_id_for_pdev(mac_id,
7620 								pdev->pdev_id);
7621 
7622 				pdev->pktlog_ppdu_stats = false;
7623 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
7624 					dp_h2t_cfg_stats_msg_send(pdev, 0,
7625 								mac_for_pdev);
7626 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
7627 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
7628 								mac_for_pdev);
7629 				} else if (pdev->enhanced_stats_en) {
7630 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
7631 								mac_for_pdev);
7632 				}
7633 			}
7634 
7635 			break;
7636 		default:
7637 			/* Nothing needs to be done for other pktlog types */
7638 			break;
7639 		}
7640 	}
7641 	return 0;
7642 }
7643 #endif
7644 
7645 #ifdef CONFIG_MCL
7646 /*
7647  * dp_service_mon_rings()- timer to reap monitor rings
7648  * reqd as we are not getting ppdu end interrupts
7649  * @arg: SoC Handle
7650  *
7651  * Return:
7652  *
7653  */
7654 static void dp_service_mon_rings(void *arg)
7655 {
7656 	struct dp_soc *soc = (struct dp_soc *) arg;
7657 	int ring = 0, work_done, mac_id;
7658 	struct dp_pdev *pdev = NULL;
7659 
7660 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
7661 		pdev = soc->pdev_list[ring];
7662 		if (pdev == NULL)
7663 			continue;
7664 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
7665 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
7666 								pdev->pdev_id);
7667 			work_done = dp_mon_process(soc, mac_for_pdev,
7668 							QCA_NAPI_BUDGET);
7669 
7670 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
7671 				FL("Reaped %d descs from Monitor rings"),
7672 				work_done);
7673 		}
7674 	}
7675 
7676 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
7677 }
7678 
7679 #ifndef REMOVE_PKT_LOG
7680 /**
7681  * dp_pkt_log_init() - API to initialize packet log
7682  * @ppdev: physical device handle
7683  * @scn: HIF context
7684  *
7685  * Return: none
7686  */
7687 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
7688 {
7689 	struct dp_pdev *handle = (struct dp_pdev *)ppdev;
7690 
7691 	if (handle->pkt_log_init) {
7692 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7693 			 "%s: Packet log not initialized", __func__);
7694 		return;
7695 	}
7696 
7697 	pktlog_sethandle(&handle->pl_dev, scn);
7698 	pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
7699 
7700 	if (pktlogmod_init(scn)) {
7701 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7702 			 "%s: pktlogmod_init failed", __func__);
7703 		handle->pkt_log_init = false;
7704 	} else {
7705 		handle->pkt_log_init = true;
7706 	}
7707 }
7708 
7709 /**
7710  * dp_pkt_log_con_service() - connect packet log service
7711  * @ppdev: physical device handle
7712  * @scn: device context
7713  *
7714  * Return: none
7715  */
7716 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
7717 {
7718 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
7719 
7720 	dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
7721 	pktlog_htc_attach();
7722 }
7723 
7724 /**
7725  * dp_pktlogmod_exit() - API to cleanup pktlog info
7726  * @handle: Pdev handle
7727  *
7728  * Return: none
7729  */
7730 static void dp_pktlogmod_exit(struct dp_pdev *handle)
7731 {
7732 	void *scn = (void *)handle->soc->hif_handle;
7733 
7734 	if (!scn) {
7735 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7736 			 "%s: Invalid hif(scn) handle", __func__);
7737 		return;
7738 	}
7739 
7740 	pktlogmod_exit(scn);
7741 	handle->pkt_log_init = false;
7742 }
7743 #endif
7744 #else
7745 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
7746 #endif
7747 
7748