xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 92d87f51612f6c3b2285266215edee8911647c2f)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_api.h>
25 #include <hif.h>
26 #include <htt.h>
27 #include <wdi_event.h>
28 #include <queue.h>
29 #include "dp_htt.h"
30 #include "dp_types.h"
31 #include "dp_internal.h"
32 #include "dp_tx.h"
33 #include "dp_tx_desc.h"
34 #include "dp_rx.h"
35 #include <cdp_txrx_handle.h>
36 #include <wlan_cfg.h>
37 #include "cdp_txrx_cmn_struct.h"
38 #include "cdp_txrx_stats_struct.h"
39 #include <qdf_util.h>
40 #include "dp_peer.h"
41 #include "dp_rx_mon.h"
42 #include "htt_stats.h"
43 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
44 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
45 #include "cdp_txrx_flow_ctrl_v2.h"
46 #else
47 static inline void
48 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
49 {
50 	return;
51 }
52 #endif
53 #include "dp_ipa.h"
54 
55 #ifdef CONFIG_MCL
56 static void dp_service_mon_rings(void *arg);
57 #ifndef REMOVE_PKT_LOG
58 #include <pktlog_ac_api.h>
59 #include <pktlog_ac.h>
60 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn);
61 #endif
62 #endif
63 static void dp_pktlogmod_exit(struct dp_pdev *handle);
64 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
65 					uint8_t *peer_mac_addr);
66 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
67 
68 #define DP_INTR_POLL_TIMER_MS	10
69 #define DP_WDS_AGING_TIMER_DEFAULT_MS	120000
70 #define DP_MCS_LENGTH (6*MAX_MCS)
71 #define DP_NSS_LENGTH (6*SS_COUNT)
72 #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
73 #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
74 #define DP_MAX_MCS_STRING_LEN 30
75 #define DP_CURR_FW_STATS_AVAIL 19
76 #define DP_HTT_DBG_EXT_STATS_MAX 256
77 #define DP_MAX_SLEEP_TIME 100
78 
79 #ifdef IPA_OFFLOAD
80 /* Exclude IPA rings from the interrupt context */
81 #define TX_RING_MASK_VAL	0xb
82 #define RX_RING_MASK_VAL	0x7
83 #else
84 #define TX_RING_MASK_VAL	0xF
85 #define RX_RING_MASK_VAL	0xF
86 #endif
87 
88 bool rx_hash = 1;
89 qdf_declare_param(rx_hash, bool);
90 
91 #define STR_MAXLEN	64
92 
93 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
94 
95 /* PPDU stats mask sent to FW to enable enhanced stats */
96 #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
97 /* PPDU stats mask sent to FW to support debug sniffer feature */
98 #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
99 /**
100  * default_dscp_tid_map - Default DSCP-TID mapping
101  *
102  * DSCP        TID
103  * 000000      0
104  * 001000      1
105  * 010000      2
106  * 011000      3
107  * 100000      4
108  * 101000      5
109  * 110000      6
110  * 111000      7
111  */
112 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
113 	0, 0, 0, 0, 0, 0, 0, 0,
114 	1, 1, 1, 1, 1, 1, 1, 1,
115 	2, 2, 2, 2, 2, 2, 2, 2,
116 	3, 3, 3, 3, 3, 3, 3, 3,
117 	4, 4, 4, 4, 4, 4, 4, 4,
118 	5, 5, 5, 5, 5, 5, 5, 5,
119 	6, 6, 6, 6, 6, 6, 6, 6,
120 	7, 7, 7, 7, 7, 7, 7, 7,
121 };
122 
123 /*
124  * struct dp_rate_debug
125  *
126  * @mcs_type: print string for a given mcs
127  * @valid: valid mcs rate?
128  */
129 struct dp_rate_debug {
130 	char mcs_type[DP_MAX_MCS_STRING_LEN];
131 	uint8_t valid;
132 };
133 
134 #define MCS_VALID 1
135 #define MCS_INVALID 0
136 
137 static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
138 
139 	{
140 		{"OFDM 48 Mbps", MCS_VALID},
141 		{"OFDM 24 Mbps", MCS_VALID},
142 		{"OFDM 12 Mbps", MCS_VALID},
143 		{"OFDM 6 Mbps ", MCS_VALID},
144 		{"OFDM 54 Mbps", MCS_VALID},
145 		{"OFDM 36 Mbps", MCS_VALID},
146 		{"OFDM 18 Mbps", MCS_VALID},
147 		{"OFDM 9 Mbps ", MCS_VALID},
148 		{"INVALID ", MCS_INVALID},
149 		{"INVALID ", MCS_INVALID},
150 		{"INVALID ", MCS_INVALID},
151 		{"INVALID ", MCS_INVALID},
152 		{"INVALID ", MCS_VALID},
153 	},
154 	{
155 		{"CCK 11 Mbps Long  ", MCS_VALID},
156 		{"CCK 5.5 Mbps Long ", MCS_VALID},
157 		{"CCK 2 Mbps Long   ", MCS_VALID},
158 		{"CCK 1 Mbps Long   ", MCS_VALID},
159 		{"CCK 11 Mbps Short ", MCS_VALID},
160 		{"CCK 5.5 Mbps Short", MCS_VALID},
161 		{"CCK 2 Mbps Short  ", MCS_VALID},
162 		{"INVALID ", MCS_INVALID},
163 		{"INVALID ", MCS_INVALID},
164 		{"INVALID ", MCS_INVALID},
165 		{"INVALID ", MCS_INVALID},
166 		{"INVALID ", MCS_INVALID},
167 		{"INVALID ", MCS_VALID},
168 	},
169 	{
170 		{"HT MCS 0 (BPSK 1/2)  ", MCS_VALID},
171 		{"HT MCS 1 (QPSK 1/2)  ", MCS_VALID},
172 		{"HT MCS 2 (QPSK 3/4)  ", MCS_VALID},
173 		{"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
174 		{"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
175 		{"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
176 		{"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
177 		{"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
178 		{"INVALID ", MCS_INVALID},
179 		{"INVALID ", MCS_INVALID},
180 		{"INVALID ", MCS_INVALID},
181 		{"INVALID ", MCS_INVALID},
182 		{"INVALID ", MCS_VALID},
183 	},
184 	{
185 		{"VHT MCS 0 (BPSK 1/2)     ", MCS_VALID},
186 		{"VHT MCS 1 (QPSK 1/2)     ", MCS_VALID},
187 		{"VHT MCS 2 (QPSK 3/4)     ", MCS_VALID},
188 		{"VHT MCS 3 (16-QAM 1/2)   ", MCS_VALID},
189 		{"VHT MCS 4 (16-QAM 3/4)   ", MCS_VALID},
190 		{"VHT MCS 5 (64-QAM 2/3)   ", MCS_VALID},
191 		{"VHT MCS 6 (64-QAM 3/4)   ", MCS_VALID},
192 		{"VHT MCS 7 (64-QAM 5/6)   ", MCS_VALID},
193 		{"VHT MCS 8 (256-QAM 3/4)  ", MCS_VALID},
194 		{"VHT MCS 9 (256-QAM 5/6)  ", MCS_VALID},
195 		{"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
196 		{"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
197 		{"INVALID ", MCS_VALID},
198 	},
199 	{
200 		{"HE MCS 0 (BPSK 1/2)     ", MCS_VALID},
201 		{"HE MCS 1 (QPSK 1/2)     ", MCS_VALID},
202 		{"HE MCS 2 (QPSK 3/4)     ", MCS_VALID},
203 		{"HE MCS 3 (16-QAM 1/2)   ", MCS_VALID},
204 		{"HE MCS 4 (16-QAM 3/4)   ", MCS_VALID},
205 		{"HE MCS 5 (64-QAM 2/3)   ", MCS_VALID},
206 		{"HE MCS 6 (64-QAM 3/4)   ", MCS_VALID},
207 		{"HE MCS 7 (64-QAM 5/6)   ", MCS_VALID},
208 		{"HE MCS 8 (256-QAM 3/4)  ", MCS_VALID},
209 		{"HE MCS 9 (256-QAM 5/6)  ", MCS_VALID},
210 		{"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
211 		{"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
212 		{"INVALID ", MCS_VALID},
213 	}
214 };
215 
216 /**
217  * @brief Cpu ring map types
218  */
219 enum dp_cpu_ring_map_types {
220 	DP_DEFAULT_MAP,
221 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
222 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
223 	DP_NSS_ALL_RADIO_OFFLOADED_MAP,
224 	DP_CPU_RING_MAP_MAX
225 };
226 
227 /**
228  * @brief Cpu to tx ring map
229  */
230 static uint8_t dp_cpu_ring_map[DP_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
231 	{0x0, 0x1, 0x2, 0x0},
232 	{0x1, 0x2, 0x1, 0x2},
233 	{0x0, 0x2, 0x0, 0x2},
234 	{0x2, 0x2, 0x2, 0x2}
235 };
236 
237 /**
238  * @brief Select the type of statistics
239  */
240 enum dp_stats_type {
241 	STATS_FW = 0,
242 	STATS_HOST = 1,
243 	STATS_TYPE_MAX = 2,
244 };
245 
246 /**
247  * @brief General Firmware statistics options
248  *
249  */
250 enum dp_fw_stats {
251 	TXRX_FW_STATS_INVALID	= -1,
252 };
253 
254 /**
255  * dp_stats_mapping_table - Firmware and Host statistics
256  * currently supported
257  */
258 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
259 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
260 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
261 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
262 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
263 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
264 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
265 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
266 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
267 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
268 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
269 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
270 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
271 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
272 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
273 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
274 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
275 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
276 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
277 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
278 	/* Last ENUM for HTT FW STATS */
279 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
280 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
281 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
282 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
283 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
284 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
285 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
286 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
287 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
288 };
289 
290 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
291 					struct cdp_peer *peer_hdl,
292 					uint8_t *mac_addr,
293 					enum cdp_txrx_ast_entry_type type,
294 					uint32_t flags)
295 {
296 
297 	return dp_peer_add_ast((struct dp_soc *)soc_hdl,
298 				(struct dp_peer *)peer_hdl,
299 				mac_addr,
300 				type,
301 				flags);
302 }
303 
304 static void dp_peer_del_ast_wifi3(struct cdp_soc_t *soc_hdl,
305 					 void *ast_entry_hdl)
306 {
307 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
308 	qdf_spin_lock_bh(&soc->ast_lock);
309 	dp_peer_del_ast((struct dp_soc *)soc_hdl,
310 			(struct dp_ast_entry *)ast_entry_hdl);
311 	qdf_spin_unlock_bh(&soc->ast_lock);
312 }
313 
314 
315 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
316 						struct cdp_peer *peer_hdl,
317 						uint8_t *wds_macaddr,
318 						uint32_t flags)
319 {
320 	int status;
321 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
322 	struct dp_ast_entry  *ast_entry = NULL;
323 
324 	qdf_spin_lock_bh(&soc->ast_lock);
325 	ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
326 
327 	status = dp_peer_update_ast(soc,
328 					(struct dp_peer *)peer_hdl,
329 					ast_entry,
330 					flags);
331 	qdf_spin_unlock_bh(&soc->ast_lock);
332 
333 	return status;
334 }
335 
336 /*
337  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
338  * @soc_handle: Datapath SOC handle
339  * @wds_macaddr: MAC address of the WDS entry to be added
340  * @vdev_hdl: vdev handle
341  * Return: None
342  */
343 static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
344 				    uint8_t *wds_macaddr, void *vdev_hdl)
345 {
346 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
347 	struct dp_ast_entry *ast_entry = NULL;
348 
349 	qdf_spin_lock_bh(&soc->ast_lock);
350 	ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
351 
352 	if (ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) {
353 		ast_entry->is_active = TRUE;
354 	}
355 	qdf_spin_unlock_bh(&soc->ast_lock);
356 }
357 
358 /*
359  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
360  * @soc: Datapath SOC handle
361  * @vdev_hdl: vdev handle
362  *
363  * Return: None
364  */
365 static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t *soc_hdl,
366 					 void *vdev_hdl)
367 {
368 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
369 	struct dp_pdev *pdev;
370 	struct dp_vdev *vdev;
371 	struct dp_peer *peer;
372 	struct dp_ast_entry *ase, *temp_ase;
373 	int i;
374 
375 	qdf_spin_lock_bh(&soc->ast_lock);
376 
377 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
378 		pdev = soc->pdev_list[i];
379 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
380 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
381 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
382 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
383 					if (ase->type ==
384 						CDP_TXRX_AST_TYPE_STATIC)
385 						continue;
386 					ase->is_active = TRUE;
387 				}
388 			}
389 		}
390 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
391 	}
392 
393 	qdf_spin_unlock_bh(&soc->ast_lock);
394 }
395 
396 /*
397  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
398  * @soc:		Datapath SOC handle
399  *
400  * Return: None
401  */
402 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
403 {
404 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
405 	struct dp_pdev *pdev;
406 	struct dp_vdev *vdev;
407 	struct dp_peer *peer;
408 	struct dp_ast_entry *ase, *temp_ase;
409 	int i;
410 
411 	qdf_spin_lock_bh(&soc->ast_lock);
412 
413 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
414 		pdev = soc->pdev_list[i];
415 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
416 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
417 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
418 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
419 					if (ase->type ==
420 						CDP_TXRX_AST_TYPE_STATIC)
421 						continue;
422 					dp_peer_del_ast(soc, ase);
423 				}
424 			}
425 		}
426 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
427 	}
428 
429 	qdf_spin_unlock_bh(&soc->ast_lock);
430 }
431 
432 static void *dp_peer_ast_hash_find_wifi3(struct cdp_soc_t *soc_hdl,
433 						uint8_t *ast_mac_addr)
434 {
435 	struct dp_ast_entry *ast_entry;
436 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
437 	qdf_spin_lock_bh(&soc->ast_lock);
438 	ast_entry = dp_peer_ast_hash_find(soc, ast_mac_addr);
439 	qdf_spin_unlock_bh(&soc->ast_lock);
440 	return (void *)ast_entry;
441 }
442 
443 static uint8_t dp_peer_ast_get_pdev_id_wifi3(struct cdp_soc_t *soc_hdl,
444 							void *ast_entry_hdl)
445 {
446 	return dp_peer_ast_get_pdev_id((struct dp_soc *)soc_hdl,
447 					(struct dp_ast_entry *)ast_entry_hdl);
448 }
449 
450 static uint8_t dp_peer_ast_get_next_hop_wifi3(struct cdp_soc_t *soc_hdl,
451 							void *ast_entry_hdl)
452 {
453 	return dp_peer_ast_get_next_hop((struct dp_soc *)soc_hdl,
454 					(struct dp_ast_entry *)ast_entry_hdl);
455 }
456 
457 static void dp_peer_ast_set_type_wifi3(
458 					struct cdp_soc_t *soc_hdl,
459 					void *ast_entry_hdl,
460 					enum cdp_txrx_ast_entry_type type)
461 {
462 	dp_peer_ast_set_type((struct dp_soc *)soc_hdl,
463 				(struct dp_ast_entry *)ast_entry_hdl,
464 				type);
465 }
466 
467 
468 
469 /**
470  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
471  * @ring_num: ring num of the ring being queried
472  * @grp_mask: the grp_mask array for the ring type in question.
473  *
474  * The grp_mask array is indexed by group number and the bit fields correspond
475  * to ring numbers.  We are finding which interrupt group a ring belongs to.
476  *
477  * Return: the index in the grp_mask array with the ring number.
478  * -QDF_STATUS_E_NOENT if no entry is found
479  */
480 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
481 {
482 	int ext_group_num;
483 	int mask = 1 << ring_num;
484 
485 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
486 	     ext_group_num++) {
487 		if (mask & grp_mask[ext_group_num])
488 			return ext_group_num;
489 	}
490 
491 	return -QDF_STATUS_E_NOENT;
492 }
493 
494 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
495 				       enum hal_ring_type ring_type,
496 				       int ring_num)
497 {
498 	int *grp_mask;
499 
500 	switch (ring_type) {
501 	case WBM2SW_RELEASE:
502 		/* dp_tx_comp_handler - soc->tx_comp_ring */
503 		if (ring_num < 3)
504 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
505 
506 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
507 		else if (ring_num == 3) {
508 			/* sw treats this as a separate ring type */
509 			grp_mask = &soc->wlan_cfg_ctx->
510 				int_rx_wbm_rel_ring_mask[0];
511 			ring_num = 0;
512 		} else {
513 			qdf_assert(0);
514 			return -QDF_STATUS_E_NOENT;
515 		}
516 	break;
517 
518 	case REO_EXCEPTION:
519 		/* dp_rx_err_process - &soc->reo_exception_ring */
520 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
521 	break;
522 
523 	case REO_DST:
524 		/* dp_rx_process - soc->reo_dest_ring */
525 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
526 	break;
527 
528 	case REO_STATUS:
529 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
530 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
531 	break;
532 
533 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
534 	case RXDMA_MONITOR_STATUS:
535 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
536 	case RXDMA_MONITOR_DST:
537 		/* dp_mon_process */
538 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
539 	break;
540 	case RXDMA_DST:
541 		/* dp_rxdma_err_process */
542 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
543 	break;
544 
545 	case RXDMA_BUF:
546 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
547 	break;
548 
549 	case RXDMA_MONITOR_BUF:
550 		/* TODO: support low_thresh interrupt */
551 		return -QDF_STATUS_E_NOENT;
552 	break;
553 
554 	case TCL_DATA:
555 	case TCL_CMD:
556 	case REO_CMD:
557 	case SW2WBM_RELEASE:
558 	case WBM_IDLE_LINK:
559 		/* normally empty SW_TO_HW rings */
560 		return -QDF_STATUS_E_NOENT;
561 	break;
562 
563 	case TCL_STATUS:
564 	case REO_REINJECT:
565 		/* misc unused rings */
566 		return -QDF_STATUS_E_NOENT;
567 	break;
568 
569 	case CE_SRC:
570 	case CE_DST:
571 	case CE_DST_STATUS:
572 		/* CE_rings - currently handled by hif */
573 	default:
574 		return -QDF_STATUS_E_NOENT;
575 	break;
576 	}
577 
578 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
579 }
580 
581 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
582 			      *ring_params, int ring_type, int ring_num)
583 {
584 	int msi_group_number;
585 	int msi_data_count;
586 	int ret;
587 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
588 
589 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
590 					    &msi_data_count, &msi_data_start,
591 					    &msi_irq_start);
592 
593 	if (ret)
594 		return;
595 
596 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
597 						       ring_num);
598 	if (msi_group_number < 0) {
599 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
600 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
601 			ring_type, ring_num);
602 		ring_params->msi_addr = 0;
603 		ring_params->msi_data = 0;
604 		return;
605 	}
606 
607 	if (msi_group_number > msi_data_count) {
608 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
609 			FL("2 msi_groups will share an msi; msi_group_num %d"),
610 			msi_group_number);
611 
612 		QDF_ASSERT(0);
613 	}
614 
615 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
616 
617 	ring_params->msi_addr = addr_low;
618 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
619 	ring_params->msi_data = (msi_group_number % msi_data_count)
620 		+ msi_data_start;
621 	ring_params->flags |= HAL_SRNG_MSI_INTR;
622 }
623 
624 /**
625  * dp_print_ast_stats() - Dump AST table contents
626  * @soc: Datapath soc handle
627  *
628  * return void
629  */
630 #ifdef FEATURE_AST
631 static void dp_print_ast_stats(struct dp_soc *soc)
632 {
633 	uint8_t i;
634 	uint8_t num_entries = 0;
635 	struct dp_vdev *vdev;
636 	struct dp_pdev *pdev;
637 	struct dp_peer *peer;
638 	struct dp_ast_entry *ase, *tmp_ase;
639 	char type[5][10] = {"NONE", "STATIC", "WDS", "MEC", "HMWDS"};
640 
641 	DP_PRINT_STATS("AST Stats:");
642 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
643 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
644 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
645 	DP_PRINT_STATS("AST Table:");
646 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
647 		pdev = soc->pdev_list[i];
648 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
649 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
650 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
651 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
652 					DP_PRINT_STATS("%6d mac_addr = %pM"
653 							" peer_mac_addr = %pM"
654 							" type = %s"
655 							" next_hop = %d"
656 							" is_active = %d"
657 							" is_bss = %d"
658 							" ast_idx = %d"
659 							" pdev_id = %d"
660 							" vdev_id = %d",
661 							++num_entries,
662 							ase->mac_addr.raw,
663 							ase->peer->mac_addr.raw,
664 							type[ase->type],
665 							ase->next_hop,
666 							ase->is_active,
667 							ase->is_bss,
668 							ase->ast_idx,
669 							ase->pdev_id,
670 							ase->vdev_id);
671 				}
672 			}
673 		}
674 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
675 	}
676 }
677 #else
678 static void dp_print_ast_stats(struct dp_soc *soc)
679 {
680 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
681 	return;
682 }
683 #endif
684 
685 static void dp_print_peer_table(struct dp_vdev *vdev)
686 {
687 	struct dp_peer *peer = NULL;
688 
689 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
690 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
691 		if (!peer) {
692 			DP_PRINT_STATS("Invalid Peer");
693 			return;
694 		}
695 		DP_PRINT_STATS("    peer_mac_addr = %pM"
696 			" nawds_enabled = %d"
697 			" bss_peer = %d"
698 			" wapi = %d"
699 			" wds_enabled = %d"
700 			" delete in progress = %d",
701 			peer->mac_addr.raw,
702 			peer->nawds_enabled,
703 			peer->bss_peer,
704 			peer->wapi,
705 			peer->wds_enabled,
706 			peer->delete_in_progress);
707 	}
708 }
709 
710 /*
711  * dp_setup_srng - Internal function to setup SRNG rings used by data path
712  */
713 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
714 	int ring_type, int ring_num, int mac_id, uint32_t num_entries)
715 {
716 	void *hal_soc = soc->hal_soc;
717 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
718 	/* TODO: See if we should get align size from hal */
719 	uint32_t ring_base_align = 8;
720 	struct hal_srng_params ring_params;
721 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
722 
723 	/* TODO: Currently hal layer takes care of endianness related settings.
724 	 * See if these settings need to passed from DP layer
725 	 */
726 	ring_params.flags = 0;
727 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
728 		  FL("Ring type: %d, num:%d"), ring_type, ring_num);
729 
730 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
731 	srng->hal_srng = NULL;
732 	srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
733 	srng->num_entries = num_entries;
734 	srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
735 		soc->osdev, soc->osdev->dev, srng->alloc_size,
736 		&(srng->base_paddr_unaligned));
737 
738 	if (!srng->base_vaddr_unaligned) {
739 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
740 			FL("alloc failed - ring_type: %d, ring_num %d"),
741 			ring_type, ring_num);
742 		return QDF_STATUS_E_NOMEM;
743 	}
744 
745 	ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
746 		((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
747 	ring_params.ring_base_paddr = srng->base_paddr_unaligned +
748 		((unsigned long)(ring_params.ring_base_vaddr) -
749 		(unsigned long)srng->base_vaddr_unaligned);
750 	ring_params.num_entries = num_entries;
751 
752 	if (soc->intr_mode == DP_INTR_MSI) {
753 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
754 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
755 			FL("Using MSI for ring_type: %d, ring_num %d"),
756 			ring_type, ring_num);
757 
758 	} else {
759 		ring_params.msi_data = 0;
760 		ring_params.msi_addr = 0;
761 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
762 			FL("Skipping MSI for ring_type: %d, ring_num %d"),
763 			ring_type, ring_num);
764 	}
765 
766 	/*
767 	 * Setup interrupt timer and batch counter thresholds for
768 	 * interrupt mitigation based on ring type
769 	 */
770 	if (ring_type == REO_DST) {
771 		ring_params.intr_timer_thres_us =
772 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
773 		ring_params.intr_batch_cntr_thres_entries =
774 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
775 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
776 		ring_params.intr_timer_thres_us =
777 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
778 		ring_params.intr_batch_cntr_thres_entries =
779 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
780 	} else {
781 		ring_params.intr_timer_thres_us =
782 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
783 		ring_params.intr_batch_cntr_thres_entries =
784 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
785 	}
786 
787 	/* Enable low threshold interrupts for rx buffer rings (regular and
788 	 * monitor buffer rings.
789 	 * TODO: See if this is required for any other ring
790 	 */
791 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
792 		(ring_type == RXDMA_MONITOR_STATUS)) {
793 		/* TODO: Setting low threshold to 1/8th of ring size
794 		 * see if this needs to be configurable
795 		 */
796 		ring_params.low_threshold = num_entries >> 3;
797 		ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
798 		ring_params.intr_timer_thres_us =
799 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
800 		ring_params.intr_batch_cntr_thres_entries = 0;
801 	}
802 
803 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
804 		mac_id, &ring_params);
805 
806 	if (!srng->hal_srng) {
807 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
808 				srng->alloc_size,
809 				srng->base_vaddr_unaligned,
810 				srng->base_paddr_unaligned, 0);
811 	}
812 
813 	return 0;
814 }
815 
816 /**
817  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
818  * Any buffers allocated and attached to ring entries are expected to be freed
819  * before calling this function.
820  */
821 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
822 	int ring_type, int ring_num)
823 {
824 	if (!srng->hal_srng) {
825 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
826 			FL("Ring type: %d, num:%d not setup"),
827 			ring_type, ring_num);
828 		return;
829 	}
830 
831 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
832 
833 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
834 				srng->alloc_size,
835 				srng->base_vaddr_unaligned,
836 				srng->base_paddr_unaligned, 0);
837 	srng->hal_srng = NULL;
838 }
839 
840 /* TODO: Need this interface from HIF */
841 void *hif_get_hal_handle(void *hif_handle);
842 
843 /*
844  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
845  * @dp_ctx: DP SOC handle
846  * @budget: Number of frames/descriptors that can be processed in one shot
847  *
848  * Return: remaining budget/quota for the soc device
849  */
850 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
851 {
852 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
853 	struct dp_soc *soc = int_ctx->soc;
854 	int ring = 0;
855 	uint32_t work_done  = 0;
856 	int budget = dp_budget;
857 	uint8_t tx_mask = int_ctx->tx_ring_mask;
858 	uint8_t rx_mask = int_ctx->rx_ring_mask;
859 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
860 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
861 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
862 	uint32_t remaining_quota = dp_budget;
863 	struct dp_pdev *pdev = NULL;
864 	int mac_id;
865 
866 	/* Process Tx completion interrupts first to return back buffers */
867 	while (tx_mask) {
868 		if (tx_mask & 0x1) {
869 			work_done = dp_tx_comp_handler(soc,
870 					soc->tx_comp_ring[ring].hal_srng,
871 					remaining_quota);
872 
873 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
874 				"tx mask 0x%x ring %d, budget %d, work_done %d",
875 				tx_mask, ring, budget, work_done);
876 
877 			budget -= work_done;
878 			if (budget <= 0)
879 				goto budget_done;
880 
881 			remaining_quota = budget;
882 		}
883 		tx_mask = tx_mask >> 1;
884 		ring++;
885 	}
886 
887 
888 	/* Process REO Exception ring interrupt */
889 	if (rx_err_mask) {
890 		work_done = dp_rx_err_process(soc,
891 				soc->reo_exception_ring.hal_srng,
892 				remaining_quota);
893 
894 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
895 			"REO Exception Ring: work_done %d budget %d",
896 			work_done, budget);
897 
898 		budget -=  work_done;
899 		if (budget <= 0) {
900 			goto budget_done;
901 		}
902 		remaining_quota = budget;
903 	}
904 
905 	/* Process Rx WBM release ring interrupt */
906 	if (rx_wbm_rel_mask) {
907 		work_done = dp_rx_wbm_err_process(soc,
908 				soc->rx_rel_ring.hal_srng, remaining_quota);
909 
910 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
911 			"WBM Release Ring: work_done %d budget %d",
912 			work_done, budget);
913 
914 		budget -=  work_done;
915 		if (budget <= 0) {
916 			goto budget_done;
917 		}
918 		remaining_quota = budget;
919 	}
920 
921 	/* Process Rx interrupts */
922 	if (rx_mask) {
923 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
924 			if (rx_mask & (1 << ring)) {
925 				work_done = dp_rx_process(int_ctx,
926 					    soc->reo_dest_ring[ring].hal_srng,
927 					    remaining_quota);
928 
929 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
930 					"rx mask 0x%x ring %d, work_done %d budget %d",
931 					rx_mask, ring, work_done, budget);
932 
933 				budget -=  work_done;
934 				if (budget <= 0)
935 					goto budget_done;
936 				remaining_quota = budget;
937 			}
938 		}
939 		for (ring = 0; ring < MAX_RX_MAC_RINGS; ring++) {
940 			work_done = dp_rxdma_err_process(soc, ring,
941 						remaining_quota);
942 			budget -= work_done;
943 		}
944 	}
945 
946 	if (reo_status_mask)
947 		dp_reo_status_ring_handler(soc);
948 
949 	/* Process LMAC interrupts */
950 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
951 		pdev = soc->pdev_list[ring];
952 		if (pdev == NULL)
953 			continue;
954 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
955 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
956 								pdev->pdev_id);
957 
958 			if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
959 				work_done = dp_mon_process(soc, mac_for_pdev,
960 						remaining_quota);
961 				budget -= work_done;
962 				if (budget <= 0)
963 					goto budget_done;
964 				remaining_quota = budget;
965 			}
966 
967 			if (int_ctx->rxdma2host_ring_mask &
968 					(1 << mac_for_pdev)) {
969 				work_done = dp_rxdma_err_process(soc,
970 							mac_for_pdev,
971 							remaining_quota);
972 				budget -=  work_done;
973 				if (budget <= 0)
974 					goto budget_done;
975 				remaining_quota = budget;
976 			}
977 
978 			if (int_ctx->host2rxdma_ring_mask &
979 						(1 << mac_for_pdev)) {
980 				union dp_rx_desc_list_elem_t *desc_list = NULL;
981 				union dp_rx_desc_list_elem_t *tail = NULL;
982 				struct dp_srng *rx_refill_buf_ring =
983 					&pdev->rx_refill_buf_ring;
984 
985 				DP_STATS_INC(pdev, replenish.low_thresh_intrs,
986 						1);
987 				dp_rx_buffers_replenish(soc, mac_for_pdev,
988 					rx_refill_buf_ring,
989 					&soc->rx_desc_buf[mac_for_pdev], 0,
990 					&desc_list, &tail);
991 			}
992 		}
993 	}
994 
995 	qdf_lro_flush(int_ctx->lro_ctx);
996 
997 budget_done:
998 	return dp_budget - budget;
999 }
1000 
1001 #ifdef DP_INTR_POLL_BASED
1002 /* dp_interrupt_timer()- timer poll for interrupts
1003  *
1004  * @arg: SoC Handle
1005  *
1006  * Return:
1007  *
1008  */
1009 static void dp_interrupt_timer(void *arg)
1010 {
1011 	struct dp_soc *soc = (struct dp_soc *) arg;
1012 	int i;
1013 
1014 	if (qdf_atomic_read(&soc->cmn_init_done)) {
1015 		for (i = 0;
1016 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1017 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
1018 
1019 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1020 	}
1021 }
1022 
1023 /*
1024  * dp_soc_interrupt_attach_poll() - Register handlers for DP interrupts
1025  * @txrx_soc: DP SOC handle
1026  *
1027  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1028  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1029  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1030  *
1031  * Return: 0 for success. nonzero for failure.
1032  */
1033 static QDF_STATUS dp_soc_interrupt_attach_poll(void *txrx_soc)
1034 {
1035 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1036 	int i;
1037 
1038 	soc->intr_mode = DP_INTR_POLL;
1039 
1040 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1041 		soc->intr_ctx[i].dp_intr_id = i;
1042 		soc->intr_ctx[i].tx_ring_mask =
1043 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1044 		soc->intr_ctx[i].rx_ring_mask =
1045 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1046 		soc->intr_ctx[i].rx_mon_ring_mask =
1047 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1048 		soc->intr_ctx[i].rx_err_ring_mask =
1049 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1050 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1051 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1052 		soc->intr_ctx[i].reo_status_ring_mask =
1053 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1054 		soc->intr_ctx[i].rxdma2host_ring_mask =
1055 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1056 		soc->intr_ctx[i].soc = soc;
1057 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1058 	}
1059 
1060 	qdf_timer_init(soc->osdev, &soc->int_timer,
1061 			dp_interrupt_timer, (void *)soc,
1062 			QDF_TIMER_TYPE_WAKE_APPS);
1063 
1064 	return QDF_STATUS_SUCCESS;
1065 }
1066 
1067 #if defined(CONFIG_MCL)
1068 extern int con_mode_monitor;
1069 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
1070 /*
1071  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1072  * @txrx_soc: DP SOC handle
1073  *
1074  * Call the appropriate attach function based on the mode of operation.
1075  * This is a WAR for enabling monitor mode.
1076  *
1077  * Return: 0 for success. nonzero for failure.
1078  */
1079 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1080 {
1081 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1082 
1083 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1084 	     con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
1085 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1086 				  "%s: Poll mode", __func__);
1087 		return dp_soc_interrupt_attach_poll(txrx_soc);
1088 	} else {
1089 
1090 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1091 				  "%s: Interrupt  mode", __func__);
1092 		return dp_soc_interrupt_attach(txrx_soc);
1093 	}
1094 }
1095 #else
1096 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1097 {
1098 	return dp_soc_interrupt_attach_poll(txrx_soc);
1099 }
1100 #endif
1101 #endif
1102 
1103 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1104 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1105 {
1106 	int j;
1107 	int num_irq = 0;
1108 
1109 	int tx_mask =
1110 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1111 	int rx_mask =
1112 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1113 	int rx_mon_mask =
1114 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1115 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1116 					soc->wlan_cfg_ctx, intr_ctx_num);
1117 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1118 					soc->wlan_cfg_ctx, intr_ctx_num);
1119 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1120 					soc->wlan_cfg_ctx, intr_ctx_num);
1121 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1122 					soc->wlan_cfg_ctx, intr_ctx_num);
1123 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1124 					soc->wlan_cfg_ctx, intr_ctx_num);
1125 
1126 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1127 
1128 		if (tx_mask & (1 << j)) {
1129 			irq_id_map[num_irq++] =
1130 				(wbm2host_tx_completions_ring1 - j);
1131 		}
1132 
1133 		if (rx_mask & (1 << j)) {
1134 			irq_id_map[num_irq++] =
1135 				(reo2host_destination_ring1 - j);
1136 		}
1137 
1138 		if (rxdma2host_ring_mask & (1 << j)) {
1139 			irq_id_map[num_irq++] =
1140 				rxdma2host_destination_ring_mac1 -
1141 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1142 		}
1143 
1144 		if (host2rxdma_ring_mask & (1 << j)) {
1145 			irq_id_map[num_irq++] =
1146 				host2rxdma_host_buf_ring_mac1 -
1147 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1148 		}
1149 
1150 		if (rx_mon_mask & (1 << j)) {
1151 			irq_id_map[num_irq++] =
1152 				ppdu_end_interrupts_mac1 -
1153 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1154 			irq_id_map[num_irq++] =
1155 				rxdma2host_monitor_status_ring_mac1 -
1156 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1157 		}
1158 
1159 		if (rx_wbm_rel_ring_mask & (1 << j))
1160 			irq_id_map[num_irq++] = wbm2host_rx_release;
1161 
1162 		if (rx_err_ring_mask & (1 << j))
1163 			irq_id_map[num_irq++] = reo2host_exception;
1164 
1165 		if (reo_status_ring_mask & (1 << j))
1166 			irq_id_map[num_irq++] = reo2host_status;
1167 
1168 	}
1169 	*num_irq_r = num_irq;
1170 }
1171 
1172 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1173 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1174 		int msi_vector_count, int msi_vector_start)
1175 {
1176 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1177 					soc->wlan_cfg_ctx, intr_ctx_num);
1178 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1179 					soc->wlan_cfg_ctx, intr_ctx_num);
1180 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1181 					soc->wlan_cfg_ctx, intr_ctx_num);
1182 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1183 					soc->wlan_cfg_ctx, intr_ctx_num);
1184 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1185 					soc->wlan_cfg_ctx, intr_ctx_num);
1186 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1187 					soc->wlan_cfg_ctx, intr_ctx_num);
1188 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1189 					soc->wlan_cfg_ctx, intr_ctx_num);
1190 
1191 	unsigned int vector =
1192 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1193 	int num_irq = 0;
1194 
1195 	soc->intr_mode = DP_INTR_MSI;
1196 
1197 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1198 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1199 		irq_id_map[num_irq++] =
1200 			pld_get_msi_irq(soc->osdev->dev, vector);
1201 
1202 	*num_irq_r = num_irq;
1203 }
1204 
1205 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1206 				    int *irq_id_map, int *num_irq)
1207 {
1208 	int msi_vector_count, ret;
1209 	uint32_t msi_base_data, msi_vector_start;
1210 
1211 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1212 					    &msi_vector_count,
1213 					    &msi_base_data,
1214 					    &msi_vector_start);
1215 	if (ret)
1216 		return dp_soc_interrupt_map_calculate_integrated(soc,
1217 				intr_ctx_num, irq_id_map, num_irq);
1218 
1219 	else
1220 		dp_soc_interrupt_map_calculate_msi(soc,
1221 				intr_ctx_num, irq_id_map, num_irq,
1222 				msi_vector_count, msi_vector_start);
1223 }
1224 
1225 /*
1226  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1227  * @txrx_soc: DP SOC handle
1228  *
1229  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1230  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1231  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1232  *
1233  * Return: 0 for success. nonzero for failure.
1234  */
1235 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
1236 {
1237 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1238 
1239 	int i = 0;
1240 	int num_irq = 0;
1241 
1242 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1243 		int ret = 0;
1244 
1245 		/* Map of IRQ ids registered with one interrupt context */
1246 		int irq_id_map[HIF_MAX_GRP_IRQ];
1247 
1248 		int tx_mask =
1249 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1250 		int rx_mask =
1251 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1252 		int rx_mon_mask =
1253 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1254 		int rx_err_ring_mask =
1255 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1256 		int rx_wbm_rel_ring_mask =
1257 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1258 		int reo_status_ring_mask =
1259 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1260 		int rxdma2host_ring_mask =
1261 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1262 		int host2rxdma_ring_mask =
1263 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1264 
1265 
1266 		soc->intr_ctx[i].dp_intr_id = i;
1267 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1268 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1269 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1270 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1271 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1272 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1273 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1274 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1275 
1276 		soc->intr_ctx[i].soc = soc;
1277 
1278 		num_irq = 0;
1279 
1280 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1281 					       &num_irq);
1282 
1283 		ret = hif_register_ext_group(soc->hif_handle,
1284 				num_irq, irq_id_map, dp_service_srngs,
1285 				&soc->intr_ctx[i], "dp_intr",
1286 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1287 
1288 		if (ret) {
1289 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1290 			FL("failed, ret = %d"), ret);
1291 
1292 			return QDF_STATUS_E_FAILURE;
1293 		}
1294 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1295 	}
1296 
1297 	hif_configure_ext_group_interrupts(soc->hif_handle);
1298 
1299 	return QDF_STATUS_SUCCESS;
1300 }
1301 
1302 /*
1303  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1304  * @txrx_soc: DP SOC handle
1305  *
1306  * Return: void
1307  */
1308 static void dp_soc_interrupt_detach(void *txrx_soc)
1309 {
1310 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1311 	int i;
1312 
1313 	if (soc->intr_mode == DP_INTR_POLL) {
1314 		qdf_timer_stop(&soc->int_timer);
1315 		qdf_timer_free(&soc->int_timer);
1316 	} else {
1317 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1318 	}
1319 
1320 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1321 		soc->intr_ctx[i].tx_ring_mask = 0;
1322 		soc->intr_ctx[i].rx_ring_mask = 0;
1323 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1324 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1325 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1326 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1327 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1328 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1329 
1330 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1331 	}
1332 }
1333 
1334 #define AVG_MAX_MPDUS_PER_TID 128
1335 #define AVG_TIDS_PER_CLIENT 2
1336 #define AVG_FLOWS_PER_TID 2
1337 #define AVG_MSDUS_PER_FLOW 128
1338 #define AVG_MSDUS_PER_MPDU 4
1339 
1340 /*
1341  * Allocate and setup link descriptor pool that will be used by HW for
1342  * various link and queue descriptors and managed by WBM
1343  */
1344 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1345 {
1346 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1347 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1348 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1349 	uint32_t num_mpdus_per_link_desc =
1350 		hal_num_mpdus_per_link_desc(soc->hal_soc);
1351 	uint32_t num_msdus_per_link_desc =
1352 		hal_num_msdus_per_link_desc(soc->hal_soc);
1353 	uint32_t num_mpdu_links_per_queue_desc =
1354 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1355 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1356 	uint32_t total_link_descs, total_mem_size;
1357 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1358 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1359 	uint32_t num_link_desc_banks;
1360 	uint32_t last_bank_size = 0;
1361 	uint32_t entry_size, num_entries;
1362 	int i;
1363 	uint32_t desc_id = 0;
1364 
1365 	/* Only Tx queue descriptors are allocated from common link descriptor
1366 	 * pool Rx queue descriptors are not included in this because (REO queue
1367 	 * extension descriptors) they are expected to be allocated contiguously
1368 	 * with REO queue descriptors
1369 	 */
1370 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1371 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1372 
1373 	num_mpdu_queue_descs = num_mpdu_link_descs /
1374 		num_mpdu_links_per_queue_desc;
1375 
1376 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1377 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1378 		num_msdus_per_link_desc;
1379 
1380 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1381 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1382 
1383 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1384 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1385 
1386 	/* Round up to power of 2 */
1387 	total_link_descs = 1;
1388 	while (total_link_descs < num_entries)
1389 		total_link_descs <<= 1;
1390 
1391 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1392 		FL("total_link_descs: %u, link_desc_size: %d"),
1393 		total_link_descs, link_desc_size);
1394 	total_mem_size =  total_link_descs * link_desc_size;
1395 
1396 	total_mem_size += link_desc_align;
1397 
1398 	if (total_mem_size <= max_alloc_size) {
1399 		num_link_desc_banks = 0;
1400 		last_bank_size = total_mem_size;
1401 	} else {
1402 		num_link_desc_banks = (total_mem_size) /
1403 			(max_alloc_size - link_desc_align);
1404 		last_bank_size = total_mem_size %
1405 			(max_alloc_size - link_desc_align);
1406 	}
1407 
1408 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1409 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
1410 		total_mem_size, num_link_desc_banks);
1411 
1412 	for (i = 0; i < num_link_desc_banks; i++) {
1413 		soc->link_desc_banks[i].base_vaddr_unaligned =
1414 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1415 			max_alloc_size,
1416 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1417 		soc->link_desc_banks[i].size = max_alloc_size;
1418 
1419 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1420 			soc->link_desc_banks[i].base_vaddr_unaligned) +
1421 			((unsigned long)(
1422 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1423 			link_desc_align));
1424 
1425 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
1426 			soc->link_desc_banks[i].base_paddr_unaligned) +
1427 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1428 			(unsigned long)(
1429 			soc->link_desc_banks[i].base_vaddr_unaligned));
1430 
1431 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
1432 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1433 				FL("Link descriptor memory alloc failed"));
1434 			goto fail;
1435 		}
1436 	}
1437 
1438 	if (last_bank_size) {
1439 		/* Allocate last bank in case total memory required is not exact
1440 		 * multiple of max_alloc_size
1441 		 */
1442 		soc->link_desc_banks[i].base_vaddr_unaligned =
1443 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1444 			last_bank_size,
1445 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1446 		soc->link_desc_banks[i].size = last_bank_size;
1447 
1448 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1449 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
1450 			((unsigned long)(
1451 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1452 			link_desc_align));
1453 
1454 		soc->link_desc_banks[i].base_paddr =
1455 			(unsigned long)(
1456 			soc->link_desc_banks[i].base_paddr_unaligned) +
1457 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1458 			(unsigned long)(
1459 			soc->link_desc_banks[i].base_vaddr_unaligned));
1460 	}
1461 
1462 
1463 	/* Allocate and setup link descriptor idle list for HW internal use */
1464 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1465 	total_mem_size = entry_size * total_link_descs;
1466 
1467 	if (total_mem_size <= max_alloc_size) {
1468 		void *desc;
1469 
1470 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
1471 			WBM_IDLE_LINK, 0, 0, total_link_descs)) {
1472 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1473 				FL("Link desc idle ring setup failed"));
1474 			goto fail;
1475 		}
1476 
1477 		hal_srng_access_start_unlocked(soc->hal_soc,
1478 			soc->wbm_idle_link_ring.hal_srng);
1479 
1480 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1481 			soc->link_desc_banks[i].base_paddr; i++) {
1482 			uint32_t num_entries = (soc->link_desc_banks[i].size -
1483 				((unsigned long)(
1484 				soc->link_desc_banks[i].base_vaddr) -
1485 				(unsigned long)(
1486 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1487 				/ link_desc_size;
1488 			unsigned long paddr = (unsigned long)(
1489 				soc->link_desc_banks[i].base_paddr);
1490 
1491 			while (num_entries && (desc = hal_srng_src_get_next(
1492 				soc->hal_soc,
1493 				soc->wbm_idle_link_ring.hal_srng))) {
1494 				hal_set_link_desc_addr(desc,
1495 					LINK_DESC_COOKIE(desc_id, i), paddr);
1496 				num_entries--;
1497 				desc_id++;
1498 				paddr += link_desc_size;
1499 			}
1500 		}
1501 		hal_srng_access_end_unlocked(soc->hal_soc,
1502 			soc->wbm_idle_link_ring.hal_srng);
1503 	} else {
1504 		uint32_t num_scatter_bufs;
1505 		uint32_t num_entries_per_buf;
1506 		uint32_t rem_entries;
1507 		uint8_t *scatter_buf_ptr;
1508 		uint16_t scatter_buf_num;
1509 
1510 		soc->wbm_idle_scatter_buf_size =
1511 			hal_idle_list_scatter_buf_size(soc->hal_soc);
1512 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
1513 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
1514 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1515 					soc->hal_soc, total_mem_size,
1516 					soc->wbm_idle_scatter_buf_size);
1517 
1518 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
1519 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1520 					FL("scatter bufs size out of bounds"));
1521 			goto fail;
1522 		}
1523 
1524 		for (i = 0; i < num_scatter_bufs; i++) {
1525 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
1526 				qdf_mem_alloc_consistent(soc->osdev,
1527 							soc->osdev->dev,
1528 				soc->wbm_idle_scatter_buf_size,
1529 				&(soc->wbm_idle_scatter_buf_base_paddr[i]));
1530 			if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
1531 				QDF_TRACE(QDF_MODULE_ID_DP,
1532 						QDF_TRACE_LEVEL_ERROR,
1533 					FL("Scatter list memory alloc failed"));
1534 				goto fail;
1535 			}
1536 		}
1537 
1538 		/* Populate idle list scatter buffers with link descriptor
1539 		 * pointers
1540 		 */
1541 		scatter_buf_num = 0;
1542 		scatter_buf_ptr = (uint8_t *)(
1543 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
1544 		rem_entries = num_entries_per_buf;
1545 
1546 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1547 			soc->link_desc_banks[i].base_paddr; i++) {
1548 			uint32_t num_link_descs =
1549 				(soc->link_desc_banks[i].size -
1550 				((unsigned long)(
1551 				soc->link_desc_banks[i].base_vaddr) -
1552 				(unsigned long)(
1553 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1554 				/ link_desc_size;
1555 			unsigned long paddr = (unsigned long)(
1556 				soc->link_desc_banks[i].base_paddr);
1557 
1558 			while (num_link_descs) {
1559 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
1560 					LINK_DESC_COOKIE(desc_id, i), paddr);
1561 				num_link_descs--;
1562 				desc_id++;
1563 				paddr += link_desc_size;
1564 				rem_entries--;
1565 				if (rem_entries) {
1566 					scatter_buf_ptr += entry_size;
1567 				} else {
1568 					rem_entries = num_entries_per_buf;
1569 					scatter_buf_num++;
1570 
1571 					if (scatter_buf_num >= num_scatter_bufs)
1572 						break;
1573 
1574 					scatter_buf_ptr = (uint8_t *)(
1575 						soc->wbm_idle_scatter_buf_base_vaddr[
1576 						scatter_buf_num]);
1577 				}
1578 			}
1579 		}
1580 		/* Setup link descriptor idle list in HW */
1581 		hal_setup_link_idle_list(soc->hal_soc,
1582 			soc->wbm_idle_scatter_buf_base_paddr,
1583 			soc->wbm_idle_scatter_buf_base_vaddr,
1584 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
1585 			(uint32_t)(scatter_buf_ptr -
1586 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
1587 			scatter_buf_num-1])), total_link_descs);
1588 	}
1589 	return 0;
1590 
1591 fail:
1592 	if (soc->wbm_idle_link_ring.hal_srng) {
1593 		dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
1594 			WBM_IDLE_LINK, 0);
1595 	}
1596 
1597 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1598 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1599 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1600 				soc->wbm_idle_scatter_buf_size,
1601 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1602 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1603 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1604 		}
1605 	}
1606 
1607 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1608 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1609 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1610 				soc->link_desc_banks[i].size,
1611 				soc->link_desc_banks[i].base_vaddr_unaligned,
1612 				soc->link_desc_banks[i].base_paddr_unaligned,
1613 				0);
1614 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1615 		}
1616 	}
1617 	return QDF_STATUS_E_FAILURE;
1618 }
1619 
1620 /*
1621  * Free link descriptor pool that was setup HW
1622  */
1623 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
1624 {
1625 	int i;
1626 
1627 	if (soc->wbm_idle_link_ring.hal_srng) {
1628 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
1629 			WBM_IDLE_LINK, 0);
1630 	}
1631 
1632 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1633 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1634 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1635 				soc->wbm_idle_scatter_buf_size,
1636 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1637 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1638 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1639 		}
1640 	}
1641 
1642 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1643 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1644 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1645 				soc->link_desc_banks[i].size,
1646 				soc->link_desc_banks[i].base_vaddr_unaligned,
1647 				soc->link_desc_banks[i].base_paddr_unaligned,
1648 				0);
1649 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1650 		}
1651 	}
1652 }
1653 
1654 /* TODO: Following should be configurable */
1655 #define WBM_RELEASE_RING_SIZE 64
1656 #define TCL_CMD_RING_SIZE 32
1657 #define TCL_STATUS_RING_SIZE 32
1658 #if defined(QCA_WIFI_QCA6290)
1659 #define REO_DST_RING_SIZE 1024
1660 #else
1661 #define REO_DST_RING_SIZE 2048
1662 #endif
1663 #define REO_REINJECT_RING_SIZE 32
1664 #define RX_RELEASE_RING_SIZE 1024
1665 #define REO_EXCEPTION_RING_SIZE 128
1666 #define REO_CMD_RING_SIZE 64
1667 #define REO_STATUS_RING_SIZE 128
1668 #define RXDMA_BUF_RING_SIZE 1024
1669 #define RXDMA_REFILL_RING_SIZE 4096
1670 #define RXDMA_MONITOR_BUF_RING_SIZE 4096
1671 #define RXDMA_MONITOR_DST_RING_SIZE 2048
1672 #define RXDMA_MONITOR_STATUS_RING_SIZE 1024
1673 #define RXDMA_MONITOR_DESC_RING_SIZE 4096
1674 #define RXDMA_ERR_DST_RING_SIZE 1024
1675 
1676 /*
1677  * dp_wds_aging_timer_fn() - Timer callback function for WDS aging
1678  * @soc: Datapath SOC handle
1679  *
1680  * This is a timer function used to age out stale AST nodes from
1681  * AST table
1682  */
1683 #ifdef FEATURE_WDS
1684 static void dp_wds_aging_timer_fn(void *soc_hdl)
1685 {
1686 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
1687 	struct dp_pdev *pdev;
1688 	struct dp_vdev *vdev;
1689 	struct dp_peer *peer;
1690 	struct dp_ast_entry *ase, *temp_ase;
1691 	int i;
1692 
1693 	qdf_spin_lock_bh(&soc->ast_lock);
1694 
1695 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1696 		pdev = soc->pdev_list[i];
1697 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
1698 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1699 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1700 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
1701 					/*
1702 					 * Do not expire static ast entries
1703 					 * and HM WDS entries
1704 					 */
1705 					if (ase->type != CDP_TXRX_AST_TYPE_WDS)
1706 						continue;
1707 
1708 					if (ase->is_active) {
1709 						ase->is_active = FALSE;
1710 						continue;
1711 					}
1712 
1713 					DP_STATS_INC(soc, ast.aged_out, 1);
1714 					dp_peer_del_ast(soc, ase);
1715 				}
1716 			}
1717 		}
1718 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1719 	}
1720 
1721 	qdf_spin_unlock_bh(&soc->ast_lock);
1722 
1723 	if (qdf_atomic_read(&soc->cmn_init_done))
1724 		qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1725 }
1726 
1727 
1728 /*
1729  * dp_soc_wds_attach() - Setup WDS timer and AST table
1730  * @soc:		Datapath SOC handle
1731  *
1732  * Return: None
1733  */
1734 static void dp_soc_wds_attach(struct dp_soc *soc)
1735 {
1736 	qdf_timer_init(soc->osdev, &soc->wds_aging_timer,
1737 			dp_wds_aging_timer_fn, (void *)soc,
1738 			QDF_TIMER_TYPE_WAKE_APPS);
1739 
1740 	qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1741 }
1742 
1743 /*
1744  * dp_soc_wds_detach() - Detach WDS data structures and timers
1745  * @txrx_soc: DP SOC handle
1746  *
1747  * Return: None
1748  */
1749 static void dp_soc_wds_detach(struct dp_soc *soc)
1750 {
1751 	qdf_timer_stop(&soc->wds_aging_timer);
1752 	qdf_timer_free(&soc->wds_aging_timer);
1753 }
1754 #else
1755 static void dp_soc_wds_attach(struct dp_soc *soc)
1756 {
1757 }
1758 
1759 static void dp_soc_wds_detach(struct dp_soc *soc)
1760 {
1761 }
1762 #endif
1763 
1764 /*
1765  * dp_soc_reset_ring_map() - Reset cpu ring map
1766  * @soc: Datapath soc handler
1767  *
1768  * This api resets the default cpu ring map
1769  */
1770 
1771 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
1772 {
1773 	uint8_t i;
1774 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1775 
1776 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
1777 		if (nss_config == 1) {
1778 			/*
1779 			 * Setting Tx ring map for one nss offloaded radio
1780 			 */
1781 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
1782 		} else if (nss_config == 2) {
1783 			/*
1784 			 * Setting Tx ring for two nss offloaded radios
1785 			 */
1786 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
1787 		} else {
1788 			/*
1789 			 * Setting Tx ring map for all nss offloaded radios
1790 			 */
1791 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_ALL_RADIO_OFFLOADED_MAP][i];
1792 		}
1793 	}
1794 }
1795 
1796 /*
1797  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
1798  * @dp_soc - DP soc handle
1799  * @ring_type - ring type
1800  * @ring_num - ring_num
1801  *
1802  * return 0 or 1
1803  */
1804 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
1805 {
1806 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1807 	uint8_t status = 0;
1808 
1809 	switch (ring_type) {
1810 	case WBM2SW_RELEASE:
1811 	case REO_DST:
1812 	case RXDMA_BUF:
1813 		status = ((nss_config) & (1 << ring_num));
1814 		break;
1815 	default:
1816 		break;
1817 	}
1818 
1819 	return status;
1820 }
1821 
1822 /*
1823  * dp_soc_reset_intr_mask() - reset interrupt mask
1824  * @dp_soc - DP Soc handle
1825  *
1826  * Return: Return void
1827  */
1828 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
1829 {
1830 	uint8_t j;
1831 	int *grp_mask = NULL;
1832 	int group_number, mask, num_ring;
1833 
1834 	/* number of tx ring */
1835 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
1836 
1837 	/*
1838 	 * group mask for tx completion  ring.
1839 	 */
1840 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1841 
1842 	/* loop and reset the mask for only offloaded ring */
1843 	for (j = 0; j < num_ring; j++) {
1844 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
1845 			continue;
1846 		}
1847 
1848 		/*
1849 		 * Group number corresponding to tx offloaded ring.
1850 		 */
1851 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1852 		if (group_number < 0) {
1853 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1854 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
1855 					WBM2SW_RELEASE, j);
1856 			return;
1857 		}
1858 
1859 		/* reset the tx mask for offloaded ring */
1860 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
1861 		mask &= (~(1 << j));
1862 
1863 		/*
1864 		 * reset the interrupt mask for offloaded ring.
1865 		 */
1866 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1867 	}
1868 
1869 	/* number of rx rings */
1870 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
1871 
1872 	/*
1873 	 * group mask for reo destination ring.
1874 	 */
1875 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1876 
1877 	/* loop and reset the mask for only offloaded ring */
1878 	for (j = 0; j < num_ring; j++) {
1879 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
1880 			continue;
1881 		}
1882 
1883 		/*
1884 		 * Group number corresponding to rx offloaded ring.
1885 		 */
1886 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1887 		if (group_number < 0) {
1888 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1889 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
1890 					REO_DST, j);
1891 			return;
1892 		}
1893 
1894 		/* set the interrupt mask for offloaded ring */
1895 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
1896 		mask &= (~(1 << j));
1897 
1898 		/*
1899 		 * set the interrupt mask to zero for rx offloaded radio.
1900 		 */
1901 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1902 	}
1903 
1904 	/*
1905 	 * group mask for Rx buffer refill ring
1906 	 */
1907 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1908 
1909 	/* loop and reset the mask for only offloaded ring */
1910 	for (j = 0; j < MAX_PDEV_CNT; j++) {
1911 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
1912 			continue;
1913 		}
1914 
1915 		/*
1916 		 * Group number corresponding to rx offloaded ring.
1917 		 */
1918 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1919 		if (group_number < 0) {
1920 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1921 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
1922 					REO_DST, j);
1923 			return;
1924 		}
1925 
1926 		/* set the interrupt mask for offloaded ring */
1927 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1928 				group_number);
1929 		mask &= (~(1 << j));
1930 
1931 		/*
1932 		 * set the interrupt mask to zero for rx offloaded radio.
1933 		 */
1934 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1935 			group_number, mask);
1936 	}
1937 }
1938 
1939 #ifdef IPA_OFFLOAD
1940 /**
1941  * dp_reo_remap_config() - configure reo remap register value based
1942  *                         nss configuration.
1943  *		based on offload_radio value below remap configuration
1944  *		get applied.
1945  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
1946  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
1947  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
1948  *		3 - both Radios handled by NSS (remap not required)
1949  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
1950  *
1951  * @remap1: output parameter indicates reo remap 1 register value
1952  * @remap2: output parameter indicates reo remap 2 register value
1953  * Return: bool type, true if remap is configured else false.
1954  */
1955 static bool dp_reo_remap_config(struct dp_soc *soc,
1956 				uint32_t *remap1,
1957 				uint32_t *remap2)
1958 {
1959 
1960 	*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
1961 		(0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
1962 
1963 	*remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
1964 		(0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
1965 
1966 	return true;
1967 }
1968 #else
1969 static bool dp_reo_remap_config(struct dp_soc *soc,
1970 				uint32_t *remap1,
1971 				uint32_t *remap2)
1972 {
1973 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1974 
1975 	switch (offload_radio) {
1976 	case 0:
1977 		*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
1978 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
1979 			(0x3 << 18) | (0x4 << 21)) << 8;
1980 
1981 		*remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
1982 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
1983 			(0x3 << 18) | (0x4 << 21)) << 8;
1984 		break;
1985 
1986 	case 1:
1987 		*remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
1988 			(0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
1989 			(0x2 << 18) | (0x3 << 21)) << 8;
1990 
1991 		*remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
1992 			(0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
1993 			(0x4 << 18) | (0x2 << 21)) << 8;
1994 		break;
1995 
1996 	case 2:
1997 		*remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
1998 			(0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
1999 			(0x1 << 18) | (0x3 << 21)) << 8;
2000 
2001 		*remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
2002 			(0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
2003 			(0x4 << 18) | (0x1 << 21)) << 8;
2004 		break;
2005 
2006 	case 3:
2007 		/* return false if both radios are offloaded to NSS */
2008 		return false;
2009 	}
2010 	return true;
2011 }
2012 #endif
2013 
2014 /*
2015  * dp_reo_frag_dst_set() - configure reo register to set the
2016  *                        fragment destination ring
2017  * @soc : Datapath soc
2018  * @frag_dst_ring : output parameter to set fragment destination ring
2019  *
2020  * Based on offload_radio below fragment destination rings is selected
2021  * 0 - TCL
2022  * 1 - SW1
2023  * 2 - SW2
2024  * 3 - SW3
2025  * 4 - SW4
2026  * 5 - Release
2027  * 6 - FW
2028  * 7 - alternate select
2029  *
2030  * return: void
2031  */
2032 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2033 {
2034 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2035 
2036 	switch (offload_radio) {
2037 	case 0:
2038 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2039 		break;
2040 	case 3:
2041 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2042 		break;
2043 	default:
2044 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2045 				FL("dp_reo_frag_dst_set invalid offload radio config"));
2046 		break;
2047 	}
2048 }
2049 
2050 /*
2051  * dp_soc_cmn_setup() - Common SoC level initializion
2052  * @soc:		Datapath SOC handle
2053  *
2054  * This is an internal function used to setup common SOC data structures,
2055  * to be called from PDEV attach after receiving HW mode capabilities from FW
2056  */
2057 static int dp_soc_cmn_setup(struct dp_soc *soc)
2058 {
2059 	int i;
2060 	struct hal_reo_params reo_params;
2061 	int tx_ring_size;
2062 	int tx_comp_ring_size;
2063 
2064 	if (qdf_atomic_read(&soc->cmn_init_done))
2065 		return 0;
2066 
2067 	if (dp_hw_link_desc_pool_setup(soc))
2068 		goto fail1;
2069 
2070 	/* Setup SRNG rings */
2071 	/* Common rings */
2072 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
2073 		WBM_RELEASE_RING_SIZE)) {
2074 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2075 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
2076 		goto fail1;
2077 	}
2078 
2079 
2080 	soc->num_tcl_data_rings = 0;
2081 	/* Tx data rings */
2082 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2083 		soc->num_tcl_data_rings =
2084 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2085 		tx_comp_ring_size =
2086 			wlan_cfg_tx_comp_ring_size(soc->wlan_cfg_ctx);
2087 		tx_ring_size =
2088 			wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
2089 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2090 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
2091 				TCL_DATA, i, 0, tx_ring_size)) {
2092 				QDF_TRACE(QDF_MODULE_ID_DP,
2093 					QDF_TRACE_LEVEL_ERROR,
2094 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
2095 				goto fail1;
2096 			}
2097 			/*
2098 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2099 			 * count
2100 			 */
2101 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
2102 				WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
2103 				QDF_TRACE(QDF_MODULE_ID_DP,
2104 					QDF_TRACE_LEVEL_ERROR,
2105 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
2106 				goto fail1;
2107 			}
2108 		}
2109 	} else {
2110 		/* This will be incremented during per pdev ring setup */
2111 		soc->num_tcl_data_rings = 0;
2112 	}
2113 
2114 	if (dp_tx_soc_attach(soc)) {
2115 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2116 				FL("dp_tx_soc_attach failed"));
2117 		goto fail1;
2118 	}
2119 
2120 	/* TCL command and status rings */
2121 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
2122 		TCL_CMD_RING_SIZE)) {
2123 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2124 			FL("dp_srng_setup failed for tcl_cmd_ring"));
2125 		goto fail1;
2126 	}
2127 
2128 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
2129 		TCL_STATUS_RING_SIZE)) {
2130 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2131 			FL("dp_srng_setup failed for tcl_status_ring"));
2132 		goto fail1;
2133 	}
2134 
2135 
2136 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2137 	 * descriptors
2138 	 */
2139 
2140 	/* Rx data rings */
2141 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
2142 		soc->num_reo_dest_rings =
2143 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2144 		QDF_TRACE(QDF_MODULE_ID_DP,
2145 			QDF_TRACE_LEVEL_ERROR,
2146 			FL("num_reo_dest_rings %d\n"), soc->num_reo_dest_rings);
2147 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2148 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2149 				i, 0, REO_DST_RING_SIZE)) {
2150 				QDF_TRACE(QDF_MODULE_ID_DP,
2151 					QDF_TRACE_LEVEL_ERROR,
2152 					FL("dp_srng_setup failed for reo_dest_ring[%d]"), i);
2153 				goto fail1;
2154 			}
2155 		}
2156 	} else {
2157 		/* This will be incremented during per pdev ring setup */
2158 		soc->num_reo_dest_rings = 0;
2159 	}
2160 
2161 	/* LMAC RxDMA to SW Rings configuration */
2162 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2163 		/* Only valid for MCL */
2164 		struct dp_pdev *pdev = soc->pdev_list[0];
2165 
2166 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2167 			if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2168 				RXDMA_DST, 0, i, RXDMA_ERR_DST_RING_SIZE)) {
2169 				QDF_TRACE(QDF_MODULE_ID_DP,
2170 					QDF_TRACE_LEVEL_ERROR,
2171 					FL("dp_srng_setup failed for rxdma_err_dst_ring"));
2172 				goto fail1;
2173 			}
2174 		}
2175 	}
2176 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
2177 
2178 	/* REO reinjection ring */
2179 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2180 		REO_REINJECT_RING_SIZE)) {
2181 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2182 			FL("dp_srng_setup failed for reo_reinject_ring"));
2183 		goto fail1;
2184 	}
2185 
2186 
2187 	/* Rx release ring */
2188 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2189 		RX_RELEASE_RING_SIZE)) {
2190 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2191 			FL("dp_srng_setup failed for rx_rel_ring"));
2192 		goto fail1;
2193 	}
2194 
2195 
2196 	/* Rx exception ring */
2197 	if (dp_srng_setup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0,
2198 		MAX_REO_DEST_RINGS, REO_EXCEPTION_RING_SIZE)) {
2199 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2200 			FL("dp_srng_setup failed for reo_exception_ring"));
2201 		goto fail1;
2202 	}
2203 
2204 
2205 	/* REO command and status rings */
2206 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
2207 		REO_CMD_RING_SIZE)) {
2208 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2209 			FL("dp_srng_setup failed for reo_cmd_ring"));
2210 		goto fail1;
2211 	}
2212 
2213 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2214 	TAILQ_INIT(&soc->rx.reo_cmd_list);
2215 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2216 
2217 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
2218 		REO_STATUS_RING_SIZE)) {
2219 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2220 			FL("dp_srng_setup failed for reo_status_ring"));
2221 		goto fail1;
2222 	}
2223 
2224 	qdf_spinlock_create(&soc->ast_lock);
2225 	dp_soc_wds_attach(soc);
2226 
2227 	/* Reset the cpu ring map if radio is NSS offloaded */
2228 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2229 		dp_soc_reset_cpu_ring_map(soc);
2230 		dp_soc_reset_intr_mask(soc);
2231 	}
2232 
2233 	/* Setup HW REO */
2234 	qdf_mem_zero(&reo_params, sizeof(reo_params));
2235 
2236 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2237 
2238 		/*
2239 		 * Reo ring remap is not required if both radios
2240 		 * are offloaded to NSS
2241 		 */
2242 		if (!dp_reo_remap_config(soc,
2243 					&reo_params.remap1,
2244 					&reo_params.remap2))
2245 			goto out;
2246 
2247 		reo_params.rx_hash_enabled = true;
2248 	}
2249 
2250 	/* setup the global rx defrag waitlist */
2251 	TAILQ_INIT(&soc->rx.defrag.waitlist);
2252 	soc->rx.defrag.timeout_ms =
2253 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
2254 	soc->rx.flags.defrag_timeout_check =
2255 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
2256 
2257 out:
2258 	/*
2259 	 * set the fragment destination ring
2260 	 */
2261 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2262 
2263 	hal_reo_setup(soc->hal_soc, &reo_params);
2264 
2265 	qdf_atomic_set(&soc->cmn_init_done, 1);
2266 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
2267 	return 0;
2268 fail1:
2269 	/*
2270 	 * Cleanup will be done as part of soc_detach, which will
2271 	 * be called on pdev attach failure
2272 	 */
2273 	return QDF_STATUS_E_FAILURE;
2274 }
2275 
2276 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
2277 
2278 static void dp_lro_hash_setup(struct dp_soc *soc)
2279 {
2280 	struct cdp_lro_hash_config lro_hash;
2281 
2282 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2283 		!wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2284 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2285 			 FL("LRO disabled RX hash disabled"));
2286 		return;
2287 	}
2288 
2289 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2290 
2291 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
2292 		lro_hash.lro_enable = 1;
2293 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2294 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
2295 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2296 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
2297 	}
2298 
2299 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, FL("enabled"));
2300 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
2301 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2302 		 LRO_IPV4_SEED_ARR_SZ));
2303 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2304 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2305 		 LRO_IPV6_SEED_ARR_SZ));
2306 
2307 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2308 		 "lro_hash: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
2309 		 lro_hash.lro_enable, lro_hash.tcp_flag,
2310 		 lro_hash.tcp_flag_mask);
2311 
2312 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2313 		 QDF_TRACE_LEVEL_ERROR,
2314 		 (void *)lro_hash.toeplitz_hash_ipv4,
2315 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2316 		 LRO_IPV4_SEED_ARR_SZ));
2317 
2318 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2319 		 QDF_TRACE_LEVEL_ERROR,
2320 		 (void *)lro_hash.toeplitz_hash_ipv6,
2321 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2322 		 LRO_IPV6_SEED_ARR_SZ));
2323 
2324 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2325 
2326 	if (soc->cdp_soc.ol_ops->lro_hash_config)
2327 		(void)soc->cdp_soc.ol_ops->lro_hash_config
2328 			(soc->ctrl_psoc, &lro_hash);
2329 }
2330 
2331 /*
2332 * dp_rxdma_ring_setup() - configure the RX DMA rings
2333 * @soc: data path SoC handle
2334 * @pdev: Physical device handle
2335 *
2336 * Return: 0 - success, > 0 - failure
2337 */
2338 #ifdef QCA_HOST2FW_RXBUF_RING
2339 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2340 	 struct dp_pdev *pdev)
2341 {
2342 	int max_mac_rings =
2343 		 wlan_cfg_get_num_mac_rings
2344 			(pdev->wlan_cfg_ctx);
2345 	int i;
2346 
2347 	for (i = 0; i < max_mac_rings; i++) {
2348 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2349 			 "%s: pdev_id %d mac_id %d\n",
2350 			 __func__, pdev->pdev_id, i);
2351 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
2352 			 RXDMA_BUF, 1, i, RXDMA_BUF_RING_SIZE)) {
2353 			QDF_TRACE(QDF_MODULE_ID_DP,
2354 				 QDF_TRACE_LEVEL_ERROR,
2355 				 FL("failed rx mac ring setup"));
2356 			return QDF_STATUS_E_FAILURE;
2357 		}
2358 	}
2359 	return QDF_STATUS_SUCCESS;
2360 }
2361 #else
2362 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2363 	 struct dp_pdev *pdev)
2364 {
2365 	return QDF_STATUS_SUCCESS;
2366 }
2367 #endif
2368 
2369 /**
2370  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2371  * @pdev - DP_PDEV handle
2372  *
2373  * Return: void
2374  */
2375 static inline void
2376 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2377 {
2378 	uint8_t map_id;
2379 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2380 		qdf_mem_copy(pdev->dscp_tid_map[map_id], default_dscp_tid_map,
2381 				sizeof(default_dscp_tid_map));
2382 	}
2383 	for (map_id = 0; map_id < HAL_MAX_HW_DSCP_TID_MAPS; map_id++) {
2384 		hal_tx_set_dscp_tid_map(pdev->soc->hal_soc,
2385 				pdev->dscp_tid_map[map_id],
2386 				map_id);
2387 	}
2388 }
2389 
2390 #ifdef QCA_SUPPORT_SON
2391 /**
2392  * dp_mark_peer_inact(): Update peer inactivity status
2393  * @peer_handle - datapath peer handle
2394  *
2395  * Return: void
2396  */
2397 void dp_mark_peer_inact(void *peer_handle, bool inactive)
2398 {
2399 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2400 	struct dp_pdev *pdev;
2401 	struct dp_soc *soc;
2402 	bool inactive_old;
2403 
2404 	if (!peer)
2405 		return;
2406 
2407 	pdev = peer->vdev->pdev;
2408 	soc = pdev->soc;
2409 
2410 	inactive_old = peer->peer_bs_inact_flag == 1;
2411 	if (!inactive)
2412 		peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2413 	peer->peer_bs_inact_flag = inactive ? 1 : 0;
2414 
2415 	if (inactive_old != inactive) {
2416 		/**
2417 		 * Note: a node lookup can happen in RX datapath context
2418 		 * when a node changes from inactive to active (at most once
2419 		 * per inactivity timeout threshold)
2420 		 */
2421 		if (soc->cdp_soc.ol_ops->record_act_change) {
2422 			soc->cdp_soc.ol_ops->record_act_change(pdev->osif_pdev,
2423 					peer->mac_addr.raw, !inactive);
2424 		}
2425 	}
2426 }
2427 
2428 /**
2429  * dp_txrx_peer_find_inact_timeout_handler(): Inactivity timeout function
2430  *
2431  * Periodically checks the inactivity status
2432  */
2433 static os_timer_func(dp_txrx_peer_find_inact_timeout_handler)
2434 {
2435 	struct dp_pdev *pdev;
2436 	struct dp_vdev *vdev;
2437 	struct dp_peer *peer;
2438 	struct dp_soc *soc;
2439 	int i;
2440 
2441 	OS_GET_TIMER_ARG(soc, struct dp_soc *);
2442 
2443 	qdf_spin_lock(&soc->peer_ref_mutex);
2444 
2445 	for (i = 0; i < soc->pdev_count; i++) {
2446 	pdev = soc->pdev_list[i];
2447 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
2448 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2449 		if (vdev->opmode != wlan_op_mode_ap)
2450 			continue;
2451 
2452 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2453 			if (!peer->authorize) {
2454 				/**
2455 				 * Inactivity check only interested in
2456 				 * connected node
2457 				 */
2458 				continue;
2459 			}
2460 			if (peer->peer_bs_inact > soc->pdev_bs_inact_reload) {
2461 				/**
2462 				 * This check ensures we do not wait extra long
2463 				 * due to the potential race condition
2464 				 */
2465 				peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2466 			}
2467 			if (peer->peer_bs_inact > 0) {
2468 				/* Do not let it wrap around */
2469 				peer->peer_bs_inact--;
2470 			}
2471 			if (peer->peer_bs_inact == 0)
2472 				dp_mark_peer_inact(peer, true);
2473 		}
2474 	}
2475 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2476 	}
2477 
2478 	qdf_spin_unlock(&soc->peer_ref_mutex);
2479 	qdf_timer_mod(&soc->pdev_bs_inact_timer,
2480 		      soc->pdev_bs_inact_interval * 1000);
2481 }
2482 
2483 
2484 /**
2485  * dp_free_inact_timer(): free inact timer
2486  * @timer - inact timer handle
2487  *
2488  * Return: bool
2489  */
2490 void dp_free_inact_timer(struct dp_soc *soc)
2491 {
2492 	qdf_timer_free(&soc->pdev_bs_inact_timer);
2493 }
2494 #else
2495 
2496 void dp_mark_peer_inact(void *peer, bool inactive)
2497 {
2498 	return;
2499 }
2500 
2501 void dp_free_inact_timer(struct dp_soc *soc)
2502 {
2503 	return;
2504 }
2505 
2506 #endif
2507 
2508 #ifdef IPA_OFFLOAD
2509 /**
2510  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
2511  * @soc: data path instance
2512  * @pdev: core txrx pdev context
2513  *
2514  * Return: QDF_STATUS_SUCCESS: success
2515  *         QDF_STATUS_E_RESOURCES: Error return
2516  */
2517 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2518 					   struct dp_pdev *pdev)
2519 {
2520 	/* Setup second Rx refill buffer ring */
2521 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2522 			  IPA_RX_REFILL_BUF_RING_IDX,
2523 			  pdev->pdev_id, RXDMA_REFILL_RING_SIZE)) {
2524 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2525 			FL("dp_srng_setup failed second rx refill ring"));
2526 		return QDF_STATUS_E_FAILURE;
2527 	}
2528 	return QDF_STATUS_SUCCESS;
2529 }
2530 
2531 /**
2532  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
2533  * @soc: data path instance
2534  * @pdev: core txrx pdev context
2535  *
2536  * Return: void
2537  */
2538 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2539 					      struct dp_pdev *pdev)
2540 {
2541 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2542 			IPA_RX_REFILL_BUF_RING_IDX);
2543 }
2544 
2545 #else
2546 
2547 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2548 					   struct dp_pdev *pdev)
2549 {
2550 	return QDF_STATUS_SUCCESS;
2551 }
2552 
2553 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2554 					      struct dp_pdev *pdev)
2555 {
2556 }
2557 
2558 #endif
2559 
2560 /*
2561 * dp_pdev_attach_wifi3() - attach txrx pdev
2562 * @ctrl_pdev: Opaque PDEV object
2563 * @txrx_soc: Datapath SOC handle
2564 * @htc_handle: HTC handle for host-target interface
2565 * @qdf_osdev: QDF OS device
2566 * @pdev_id: PDEV ID
2567 *
2568 * Return: DP PDEV handle on success, NULL on failure
2569 */
2570 static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
2571 	struct cdp_cfg *ctrl_pdev,
2572 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
2573 {
2574 	int tx_ring_size;
2575 	int tx_comp_ring_size;
2576 
2577 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2578 	struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
2579 	int mac_id;
2580 
2581 	if (!pdev) {
2582 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2583 			FL("DP PDEV memory allocation failed"));
2584 		goto fail0;
2585 	}
2586 
2587 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach();
2588 
2589 	if (!pdev->wlan_cfg_ctx) {
2590 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2591 			FL("pdev cfg_attach failed"));
2592 
2593 		qdf_mem_free(pdev);
2594 		goto fail0;
2595 	}
2596 
2597 	/*
2598 	 * set nss pdev config based on soc config
2599 	 */
2600 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
2601 			(wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx) & (1 << pdev_id)));
2602 
2603 	pdev->soc = soc;
2604 	pdev->osif_pdev = ctrl_pdev;
2605 	pdev->pdev_id = pdev_id;
2606 	soc->pdev_list[pdev_id] = pdev;
2607 	soc->pdev_count++;
2608 
2609 	TAILQ_INIT(&pdev->vdev_list);
2610 	qdf_spinlock_create(&pdev->vdev_list_lock);
2611 	pdev->vdev_count = 0;
2612 
2613 	qdf_spinlock_create(&pdev->tx_mutex);
2614 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
2615 	TAILQ_INIT(&pdev->neighbour_peers_list);
2616 
2617 	if (dp_soc_cmn_setup(soc)) {
2618 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2619 			FL("dp_soc_cmn_setup failed"));
2620 		goto fail1;
2621 	}
2622 
2623 	/* Setup per PDEV TCL rings if configured */
2624 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2625 		tx_ring_size =
2626 			wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
2627 		tx_comp_ring_size =
2628 			wlan_cfg_tx_comp_ring_size(soc->wlan_cfg_ctx);
2629 
2630 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
2631 			pdev_id, pdev_id, tx_ring_size)) {
2632 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2633 				FL("dp_srng_setup failed for tcl_data_ring"));
2634 			goto fail1;
2635 		}
2636 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
2637 			WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
2638 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2639 				FL("dp_srng_setup failed for tx_comp_ring"));
2640 			goto fail1;
2641 		}
2642 		soc->num_tcl_data_rings++;
2643 	}
2644 
2645 	/* Tx specific init */
2646 	if (dp_tx_pdev_attach(pdev)) {
2647 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2648 			FL("dp_tx_pdev_attach failed"));
2649 		goto fail1;
2650 	}
2651 
2652 	/* Setup per PDEV REO rings if configured */
2653 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
2654 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
2655 			pdev_id, pdev_id, REO_DST_RING_SIZE)) {
2656 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2657 				FL("dp_srng_setup failed for reo_dest_ringn"));
2658 			goto fail1;
2659 		}
2660 		soc->num_reo_dest_rings++;
2661 
2662 	}
2663 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
2664 		RXDMA_REFILL_RING_SIZE)) {
2665 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2666 			 FL("dp_srng_setup failed rx refill ring"));
2667 		goto fail1;
2668 	}
2669 
2670 	if (dp_rxdma_ring_setup(soc, pdev)) {
2671 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2672 			 FL("RXDMA ring config failed"));
2673 		goto fail1;
2674 	}
2675 
2676 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
2677 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
2678 
2679 		if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
2680 			RXDMA_MONITOR_BUF, 0, mac_for_pdev,
2681 			RXDMA_MONITOR_BUF_RING_SIZE)) {
2682 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2683 			  FL("dp_srng_setup failed for rxdma_mon_buf_ring"));
2684 			goto fail1;
2685 		}
2686 
2687 		if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
2688 			RXDMA_MONITOR_DST, 0, mac_for_pdev,
2689 			RXDMA_MONITOR_DST_RING_SIZE)) {
2690 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2691 			  FL("dp_srng_setup failed for rxdma_mon_dst_ring"));
2692 			goto fail1;
2693 		}
2694 
2695 
2696 		if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring[mac_id],
2697 			RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
2698 			RXDMA_MONITOR_STATUS_RING_SIZE)) {
2699 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2700 			 FL("dp_srng_setup failed for rxdma_mon_status_ring"));
2701 			goto fail1;
2702 		}
2703 
2704 		if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
2705 			RXDMA_MONITOR_DESC, 0, mac_for_pdev,
2706 			RXDMA_MONITOR_DESC_RING_SIZE)) {
2707 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2708 			  "dp_srng_setup failed for rxdma_mon_desc_ring\n");
2709 			goto fail1;
2710 		}
2711 	}
2712 
2713 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2714 		if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
2715 				  0, pdev_id, RXDMA_ERR_DST_RING_SIZE)) {
2716 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2717 				FL("dp_srng_setup failed for rxdma_err_dst_ring"));
2718 			goto fail1;
2719 		}
2720 	}
2721 
2722 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
2723 		goto fail1;
2724 
2725 	if (dp_ipa_ring_resource_setup(soc, pdev))
2726 		goto fail1;
2727 
2728 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
2729 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2730 			FL("dp_ipa_uc_attach failed"));
2731 		goto fail1;
2732 	}
2733 
2734 	/* Rx specific init */
2735 	if (dp_rx_pdev_attach(pdev)) {
2736 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2737 			FL("dp_rx_pdev_attach failed"));
2738 		goto fail0;
2739 	}
2740 	DP_STATS_INIT(pdev);
2741 
2742 	/* Monitor filter init */
2743 	pdev->mon_filter_mode = MON_FILTER_ALL;
2744 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
2745 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
2746 	pdev->fp_data_filter = FILTER_DATA_ALL;
2747 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
2748 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
2749 	pdev->mo_data_filter = FILTER_DATA_ALL;
2750 
2751 #ifndef CONFIG_WIN
2752 	/* MCL */
2753 	dp_local_peer_id_pool_init(pdev);
2754 #endif
2755 	dp_dscp_tid_map_setup(pdev);
2756 
2757 	/* Rx monitor mode specific init */
2758 	if (dp_rx_pdev_mon_attach(pdev)) {
2759 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2760 				"dp_rx_pdev_attach failed\n");
2761 		goto fail1;
2762 	}
2763 
2764 	if (dp_wdi_event_attach(pdev)) {
2765 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2766 				"dp_wdi_evet_attach failed\n");
2767 		goto fail1;
2768 	}
2769 
2770 	/* set the reo destination during initialization */
2771 	pdev->reo_dest = pdev->pdev_id + 1;
2772 
2773 	/*
2774 	 * initialize ppdu tlv list
2775 	 */
2776 	TAILQ_INIT(&pdev->ppdu_info_list);
2777 	pdev->tlv_count = 0;
2778 	pdev->list_depth = 0;
2779 
2780 	return (struct cdp_pdev *)pdev;
2781 
2782 fail1:
2783 	dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
2784 
2785 fail0:
2786 	return NULL;
2787 }
2788 
2789 /*
2790 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
2791 * @soc: data path SoC handle
2792 * @pdev: Physical device handle
2793 *
2794 * Return: void
2795 */
2796 #ifdef QCA_HOST2FW_RXBUF_RING
2797 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
2798 	 struct dp_pdev *pdev)
2799 {
2800 	int max_mac_rings =
2801 		 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
2802 	int i;
2803 
2804 	max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
2805 				max_mac_rings : MAX_RX_MAC_RINGS;
2806 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
2807 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
2808 			 RXDMA_BUF, 1);
2809 
2810 	qdf_timer_free(&soc->mon_reap_timer);
2811 }
2812 #else
2813 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
2814 	 struct dp_pdev *pdev)
2815 {
2816 }
2817 #endif
2818 
2819 /*
2820  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
2821  * @pdev: device object
2822  *
2823  * Return: void
2824  */
2825 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
2826 {
2827 	struct dp_neighbour_peer *peer = NULL;
2828 	struct dp_neighbour_peer *temp_peer = NULL;
2829 
2830 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
2831 			neighbour_peer_list_elem, temp_peer) {
2832 		/* delete this peer from the list */
2833 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
2834 				peer, neighbour_peer_list_elem);
2835 		qdf_mem_free(peer);
2836 	}
2837 
2838 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
2839 }
2840 
2841 /**
2842 * dp_htt_ppdu_stats_detach() - detach stats resources
2843 * @pdev: Datapath PDEV handle
2844 *
2845 * Return: void
2846 */
2847 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
2848 {
2849 	struct ppdu_info *ppdu_info, *ppdu_info_next;
2850 
2851 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
2852 			ppdu_info_list_elem, ppdu_info_next) {
2853 		if (!ppdu_info)
2854 			break;
2855 		qdf_assert_always(ppdu_info->nbuf);
2856 		qdf_nbuf_free(ppdu_info->nbuf);
2857 		qdf_mem_free(ppdu_info);
2858 	}
2859 }
2860 
2861 /*
2862 * dp_pdev_detach_wifi3() - detach txrx pdev
2863 * @txrx_pdev: Datapath PDEV handle
2864 * @force: Force detach
2865 *
2866 */
2867 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
2868 {
2869 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
2870 	struct dp_soc *soc = pdev->soc;
2871 	qdf_nbuf_t curr_nbuf, next_nbuf;
2872 	int mac_id;
2873 
2874 	dp_wdi_event_detach(pdev);
2875 
2876 	dp_tx_pdev_detach(pdev);
2877 
2878 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2879 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
2880 			TCL_DATA, pdev->pdev_id);
2881 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
2882 			WBM2SW_RELEASE, pdev->pdev_id);
2883 	}
2884 
2885 	dp_pktlogmod_exit(pdev);
2886 
2887 	dp_rx_pdev_detach(pdev);
2888 
2889 	dp_rx_pdev_mon_detach(pdev);
2890 
2891 	dp_neighbour_peers_detach(pdev);
2892 	qdf_spinlock_destroy(&pdev->tx_mutex);
2893 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
2894 
2895 	dp_ipa_uc_detach(soc, pdev);
2896 
2897 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
2898 
2899 	/* Cleanup per PDEV REO rings if configured */
2900 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
2901 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
2902 			REO_DST, pdev->pdev_id);
2903 	}
2904 
2905 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
2906 
2907 	dp_rxdma_ring_cleanup(soc, pdev);
2908 
2909 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
2910 		dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
2911 			RXDMA_MONITOR_BUF, 0);
2912 
2913 		dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
2914 			RXDMA_MONITOR_DST, 0);
2915 
2916 		dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring[mac_id],
2917 			RXDMA_MONITOR_STATUS, 0);
2918 
2919 		dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
2920 			RXDMA_MONITOR_DESC, 0);
2921 
2922 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
2923 			RXDMA_DST, 0);
2924 	}
2925 
2926 	curr_nbuf = pdev->invalid_peer_head_msdu;
2927 	while (curr_nbuf) {
2928 		next_nbuf = qdf_nbuf_next(curr_nbuf);
2929 		qdf_nbuf_free(curr_nbuf);
2930 		curr_nbuf = next_nbuf;
2931 	}
2932 
2933 	dp_htt_ppdu_stats_detach(pdev);
2934 
2935 	soc->pdev_list[pdev->pdev_id] = NULL;
2936 	soc->pdev_count--;
2937 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
2938 	qdf_mem_free(pdev->dp_txrx_handle);
2939 	qdf_mem_free(pdev);
2940 }
2941 
2942 /*
2943  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
2944  * @soc: DP SOC handle
2945  */
2946 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
2947 {
2948 	struct reo_desc_list_node *desc;
2949 	struct dp_rx_tid *rx_tid;
2950 
2951 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2952 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
2953 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
2954 		rx_tid = &desc->rx_tid;
2955 		qdf_mem_unmap_nbytes_single(soc->osdev,
2956 			rx_tid->hw_qdesc_paddr,
2957 			QDF_DMA_BIDIRECTIONAL,
2958 			rx_tid->hw_qdesc_alloc_size);
2959 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2960 		qdf_mem_free(desc);
2961 	}
2962 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2963 	qdf_list_destroy(&soc->reo_desc_freelist);
2964 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
2965 }
2966 
2967 /*
2968  * dp_soc_detach_wifi3() - Detach txrx SOC
2969  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
2970  */
2971 static void dp_soc_detach_wifi3(void *txrx_soc)
2972 {
2973 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2974 	int i;
2975 
2976 	qdf_atomic_set(&soc->cmn_init_done, 0);
2977 
2978 	qdf_flush_work(&soc->htt_stats.work);
2979 	qdf_disable_work(&soc->htt_stats.work);
2980 
2981 	/* Free pending htt stats messages */
2982 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
2983 
2984 	dp_free_inact_timer(soc);
2985 
2986 	for (i = 0; i < MAX_PDEV_CNT; i++) {
2987 		if (soc->pdev_list[i])
2988 			dp_pdev_detach_wifi3(
2989 				(struct cdp_pdev *)soc->pdev_list[i], 1);
2990 	}
2991 
2992 	dp_peer_find_detach(soc);
2993 
2994 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
2995 	 * SW descriptors
2996 	 */
2997 
2998 	/* Free the ring memories */
2999 	/* Common rings */
3000 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3001 
3002 	dp_tx_soc_detach(soc);
3003 	/* Tx data rings */
3004 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3005 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3006 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
3007 				TCL_DATA, i);
3008 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
3009 				WBM2SW_RELEASE, i);
3010 		}
3011 	}
3012 
3013 	/* TCL command and status rings */
3014 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3015 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3016 
3017 	/* Rx data rings */
3018 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3019 		soc->num_reo_dest_rings =
3020 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3021 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
3022 			/* TODO: Get number of rings and ring sizes
3023 			 * from wlan_cfg
3024 			 */
3025 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
3026 				REO_DST, i);
3027 		}
3028 	}
3029 	/* REO reinjection ring */
3030 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3031 
3032 	/* Rx release ring */
3033 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3034 
3035 	/* Rx exception ring */
3036 	/* TODO: Better to store ring_type and ring_num in
3037 	 * dp_srng during setup
3038 	 */
3039 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3040 
3041 	/* REO command and status rings */
3042 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3043 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
3044 	dp_hw_link_desc_pool_cleanup(soc);
3045 
3046 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
3047 	qdf_spinlock_destroy(&soc->htt_stats.lock);
3048 
3049 	htt_soc_detach(soc->htt_handle);
3050 
3051 	dp_reo_cmdlist_destroy(soc);
3052 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3053 	dp_reo_desc_freelist_destroy(soc);
3054 
3055 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
3056 
3057 	dp_soc_wds_detach(soc);
3058 	qdf_spinlock_destroy(&soc->ast_lock);
3059 
3060 	qdf_mem_free(soc);
3061 }
3062 
3063 /*
3064  * dp_rxdma_ring_config() - configure the RX DMA rings
3065  *
3066  * This function is used to configure the MAC rings.
3067  * On MCL host provides buffers in Host2FW ring
3068  * FW refills (copies) buffers to the ring and updates
3069  * ring_idx in register
3070  *
3071  * @soc: data path SoC handle
3072  *
3073  * Return: void
3074  */
3075 #ifdef QCA_HOST2FW_RXBUF_RING
3076 static void dp_rxdma_ring_config(struct dp_soc *soc)
3077 {
3078 	int i;
3079 
3080 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3081 		struct dp_pdev *pdev = soc->pdev_list[i];
3082 
3083 		if (pdev) {
3084 			int mac_id;
3085 			bool dbs_enable = 0;
3086 			int max_mac_rings =
3087 				 wlan_cfg_get_num_mac_rings
3088 				(pdev->wlan_cfg_ctx);
3089 
3090 			htt_srng_setup(soc->htt_handle, 0,
3091 				 pdev->rx_refill_buf_ring.hal_srng,
3092 				 RXDMA_BUF);
3093 
3094 			if (pdev->rx_refill_buf_ring2.hal_srng)
3095 				htt_srng_setup(soc->htt_handle, 0,
3096 					pdev->rx_refill_buf_ring2.hal_srng,
3097 					RXDMA_BUF);
3098 
3099 			if (soc->cdp_soc.ol_ops->
3100 				is_hw_dbs_2x2_capable) {
3101 				dbs_enable = soc->cdp_soc.ol_ops->
3102 					is_hw_dbs_2x2_capable(soc->ctrl_psoc);
3103 			}
3104 
3105 			if (dbs_enable) {
3106 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3107 				QDF_TRACE_LEVEL_ERROR,
3108 				FL("DBS enabled max_mac_rings %d\n"),
3109 					 max_mac_rings);
3110 			} else {
3111 				max_mac_rings = 1;
3112 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3113 					 QDF_TRACE_LEVEL_ERROR,
3114 					 FL("DBS disabled, max_mac_rings %d\n"),
3115 					 max_mac_rings);
3116 			}
3117 
3118 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3119 					 FL("pdev_id %d max_mac_rings %d\n"),
3120 					 pdev->pdev_id, max_mac_rings);
3121 
3122 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
3123 				int mac_for_pdev = dp_get_mac_id_for_pdev(
3124 							mac_id, pdev->pdev_id);
3125 
3126 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3127 					 QDF_TRACE_LEVEL_ERROR,
3128 					 FL("mac_id %d\n"), mac_for_pdev);
3129 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3130 					 pdev->rx_mac_buf_ring[mac_id]
3131 						.hal_srng,
3132 					 RXDMA_BUF);
3133 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3134 					pdev->rxdma_err_dst_ring[mac_id]
3135 						.hal_srng,
3136 					RXDMA_DST);
3137 
3138 				/* Configure monitor mode rings */
3139 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3140 				   pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3141 				   RXDMA_MONITOR_BUF);
3142 
3143 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3144 				   pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3145 				   RXDMA_MONITOR_DST);
3146 
3147 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3148 				  pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3149 				  RXDMA_MONITOR_STATUS);
3150 
3151 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3152 				  pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3153 				  RXDMA_MONITOR_DESC);
3154 			}
3155 		}
3156 	}
3157 
3158 	/*
3159 	 * Timer to reap rxdma status rings.
3160 	 * Needed until we enable ppdu end interrupts
3161 	 */
3162 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
3163 			dp_service_mon_rings, (void *)soc,
3164 			QDF_TIMER_TYPE_WAKE_APPS);
3165 	soc->reap_timer_init = 1;
3166 }
3167 #else
3168 /* This is only for WIN */
3169 static void dp_rxdma_ring_config(struct dp_soc *soc)
3170 {
3171 	int i;
3172 	int mac_id;
3173 
3174 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3175 		struct dp_pdev *pdev = soc->pdev_list[i];
3176 
3177 		if (pdev == NULL)
3178 			continue;
3179 
3180 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3181 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
3182 
3183 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3184 				pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
3185 
3186 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3187 				pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3188 				RXDMA_MONITOR_BUF);
3189 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3190 				pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3191 				RXDMA_MONITOR_DST);
3192 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3193 				pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3194 				RXDMA_MONITOR_STATUS);
3195 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3196 				pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3197 				RXDMA_MONITOR_DESC);
3198 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3199 				pdev->rxdma_err_dst_ring[mac_id].hal_srng,
3200 				RXDMA_DST);
3201 		}
3202 	}
3203 }
3204 #endif
3205 
3206 /*
3207  * dp_soc_attach_target_wifi3() - SOC initialization in the target
3208  * @txrx_soc: Datapath SOC handle
3209  */
3210 static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
3211 {
3212 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
3213 
3214 	htt_soc_attach_target(soc->htt_handle);
3215 
3216 	dp_rxdma_ring_config(soc);
3217 
3218 	DP_STATS_INIT(soc);
3219 
3220 	/* initialize work queue for stats processing */
3221 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
3222 
3223 	return 0;
3224 }
3225 
3226 /*
3227  * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
3228  * @txrx_soc: Datapath SOC handle
3229  */
3230 static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
3231 {
3232 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3233 	return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
3234 }
3235 /*
3236  * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
3237  * @txrx_soc: Datapath SOC handle
3238  * @nss_cfg: nss config
3239  */
3240 static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
3241 {
3242 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3243 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
3244 
3245 	wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
3246 
3247 	/*
3248 	 * TODO: masked out based on the per offloaded radio
3249 	 */
3250 	if (config == dp_nss_cfg_dbdc) {
3251 		wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
3252 		wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
3253 		wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
3254 		wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
3255 	}
3256 
3257 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3258 				FL("nss-wifi<0> nss config is enabled"));
3259 }
3260 /*
3261 * dp_vdev_attach_wifi3() - attach txrx vdev
3262 * @txrx_pdev: Datapath PDEV handle
3263 * @vdev_mac_addr: MAC address of the virtual interface
3264 * @vdev_id: VDEV Id
3265 * @wlan_op_mode: VDEV operating mode
3266 *
3267 * Return: DP VDEV handle on success, NULL on failure
3268 */
3269 static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
3270 	uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
3271 {
3272 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3273 	struct dp_soc *soc = pdev->soc;
3274 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
3275 
3276 	if (!vdev) {
3277 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3278 			FL("DP VDEV memory allocation failed"));
3279 		goto fail0;
3280 	}
3281 
3282 	vdev->pdev = pdev;
3283 	vdev->vdev_id = vdev_id;
3284 	vdev->opmode = op_mode;
3285 	vdev->osdev = soc->osdev;
3286 
3287 	vdev->osif_rx = NULL;
3288 	vdev->osif_rsim_rx_decap = NULL;
3289 	vdev->osif_get_key = NULL;
3290 	vdev->osif_rx_mon = NULL;
3291 	vdev->osif_tx_free_ext = NULL;
3292 	vdev->osif_vdev = NULL;
3293 
3294 	vdev->delete.pending = 0;
3295 	vdev->safemode = 0;
3296 	vdev->drop_unenc = 1;
3297 	vdev->sec_type = cdp_sec_type_none;
3298 #ifdef notyet
3299 	vdev->filters_num = 0;
3300 #endif
3301 
3302 	qdf_mem_copy(
3303 		&vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3304 
3305 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3306 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3307 	vdev->dscp_tid_map_id = 0;
3308 	vdev->mcast_enhancement_en = 0;
3309 
3310 	/* TODO: Initialize default HTT meta data that will be used in
3311 	 * TCL descriptors for packets transmitted from this VDEV
3312 	 */
3313 
3314 	TAILQ_INIT(&vdev->peer_list);
3315 
3316 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3317 	/* add this vdev into the pdev's list */
3318 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
3319 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3320 	pdev->vdev_count++;
3321 
3322 	dp_tx_vdev_attach(vdev);
3323 
3324 
3325 	if ((soc->intr_mode == DP_INTR_POLL) &&
3326 			wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
3327 		if (pdev->vdev_count == 1)
3328 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
3329 	}
3330 
3331 	dp_lro_hash_setup(soc);
3332 
3333 	/* LRO */
3334 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3335 		wlan_op_mode_sta == vdev->opmode)
3336 		vdev->lro_enable = true;
3337 
3338 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3339 		 "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable);
3340 
3341 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3342 		"Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
3343 	DP_STATS_INIT(vdev);
3344 
3345 	if (wlan_op_mode_sta == vdev->opmode)
3346 		dp_peer_create_wifi3((struct cdp_vdev *)vdev,
3347 							vdev->mac_addr.raw);
3348 
3349 	return (struct cdp_vdev *)vdev;
3350 
3351 fail0:
3352 	return NULL;
3353 }
3354 
3355 /**
3356  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
3357  * @vdev: Datapath VDEV handle
3358  * @osif_vdev: OSIF vdev handle
3359  * @txrx_ops: Tx and Rx operations
3360  *
3361  * Return: DP VDEV handle on success, NULL on failure
3362  */
3363 static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
3364 	void *osif_vdev,
3365 	struct ol_txrx_ops *txrx_ops)
3366 {
3367 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3368 	vdev->osif_vdev = osif_vdev;
3369 	vdev->osif_rx = txrx_ops->rx.rx;
3370 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
3371 	vdev->osif_get_key = txrx_ops->get_key;
3372 	vdev->osif_rx_mon = txrx_ops->rx.mon;
3373 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
3374 #ifdef notyet
3375 #if ATH_SUPPORT_WAPI
3376 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
3377 #endif
3378 #endif
3379 #ifdef UMAC_SUPPORT_PROXY_ARP
3380 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
3381 #endif
3382 	vdev->me_convert = txrx_ops->me_convert;
3383 
3384 	/* TODO: Enable the following once Tx code is integrated */
3385 	if (vdev->mesh_vdev)
3386 		txrx_ops->tx.tx = dp_tx_send_mesh;
3387 	else
3388 		txrx_ops->tx.tx = dp_tx_send;
3389 
3390 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
3391 
3392 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
3393 		"DP Vdev Register success");
3394 }
3395 
3396 /**
3397  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
3398  * @vdev: Datapath VDEV handle
3399  *
3400  * Return: void
3401  */
3402 static void dp_vdev_flush_peers(struct dp_vdev *vdev)
3403 {
3404 	struct dp_pdev *pdev = vdev->pdev;
3405 	struct dp_soc *soc = pdev->soc;
3406 	struct dp_peer *peer;
3407 	uint16_t *peer_ids;
3408 	uint8_t i = 0, j = 0;
3409 
3410 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
3411 	if (!peer_ids) {
3412 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3413 			"DP alloc failure - unable to flush peers");
3414 		return;
3415 	}
3416 
3417 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3418 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3419 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3420 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
3421 				if (j < soc->max_peers)
3422 					peer_ids[j++] = peer->peer_ids[i];
3423 	}
3424 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3425 
3426 	for (i = 0; i < j ; i++)
3427 		dp_rx_peer_unmap_handler(soc, peer_ids[i]);
3428 
3429 	qdf_mem_free(peer_ids);
3430 
3431 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3432 		FL("Flushed peers for vdev object %pK "), vdev);
3433 }
3434 
3435 /*
3436  * dp_vdev_detach_wifi3() - Detach txrx vdev
3437  * @txrx_vdev:		Datapath VDEV handle
3438  * @callback:		Callback OL_IF on completion of detach
3439  * @cb_context:	Callback context
3440  *
3441  */
3442 static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
3443 	ol_txrx_vdev_delete_cb callback, void *cb_context)
3444 {
3445 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3446 	struct dp_pdev *pdev = vdev->pdev;
3447 	struct dp_soc *soc = pdev->soc;
3448 
3449 	/* preconditions */
3450 	qdf_assert(vdev);
3451 
3452 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3453 	/* remove the vdev from its parent pdev's list */
3454 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
3455 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3456 
3457 	if (wlan_op_mode_sta == vdev->opmode)
3458 		dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
3459 
3460 	/*
3461 	 * If Target is hung, flush all peers before detaching vdev
3462 	 * this will free all references held due to missing
3463 	 * unmap commands from Target
3464 	 */
3465 	if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
3466 		dp_vdev_flush_peers(vdev);
3467 
3468 	/*
3469 	 * Use peer_ref_mutex while accessing peer_list, in case
3470 	 * a peer is in the process of being removed from the list.
3471 	 */
3472 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3473 	/* check that the vdev has no peers allocated */
3474 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
3475 		/* debug print - will be removed later */
3476 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
3477 			FL("not deleting vdev object %pK (%pM)"
3478 			"until deletion finishes for all its peers"),
3479 			vdev, vdev->mac_addr.raw);
3480 		/* indicate that the vdev needs to be deleted */
3481 		vdev->delete.pending = 1;
3482 		vdev->delete.callback = callback;
3483 		vdev->delete.context = cb_context;
3484 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3485 		return;
3486 	}
3487 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3488 
3489 	dp_tx_vdev_detach(vdev);
3490 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3491 		FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
3492 
3493 	qdf_mem_free(vdev);
3494 
3495 	if (callback)
3496 		callback(cb_context);
3497 }
3498 
3499 /*
3500  * dp_peer_create_wifi3() - attach txrx peer
3501  * @txrx_vdev: Datapath VDEV handle
3502  * @peer_mac_addr: Peer MAC address
3503  *
3504  * Return: DP peeer handle on success, NULL on failure
3505  */
3506 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
3507 		uint8_t *peer_mac_addr)
3508 {
3509 	struct dp_peer *peer;
3510 	int i;
3511 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3512 	struct dp_pdev *pdev;
3513 	struct dp_soc *soc;
3514 
3515 	/* preconditions */
3516 	qdf_assert(vdev);
3517 	qdf_assert(peer_mac_addr);
3518 
3519 	pdev = vdev->pdev;
3520 	soc = pdev->soc;
3521 
3522 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr,
3523 					0, vdev->vdev_id);
3524 
3525 	if (peer) {
3526 		peer->delete_in_progress = false;
3527 
3528 		qdf_spin_lock_bh(&soc->ast_lock);
3529 		TAILQ_INIT(&peer->ast_entry_list);
3530 		qdf_spin_unlock_bh(&soc->ast_lock);
3531 
3532 		/*
3533 		* on peer create, peer ref count decrements, sice new peer is not
3534 		* getting created earlier reference is reused, peer_unref_delete will
3535 		* take care of incrementing count
3536 		* */
3537 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
3538 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->osif_pdev,
3539 				vdev->vdev_id, peer->mac_addr.raw);
3540 		}
3541 
3542 		DP_STATS_INIT(peer);
3543 		return (void *)peer;
3544 	}
3545 
3546 #ifdef notyet
3547 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
3548 		soc->mempool_ol_ath_peer);
3549 #else
3550 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
3551 #endif
3552 
3553 	if (!peer)
3554 		return NULL; /* failure */
3555 
3556 	qdf_mem_zero(peer, sizeof(struct dp_peer));
3557 
3558 	TAILQ_INIT(&peer->ast_entry_list);
3559 
3560 	/* store provided params */
3561 	peer->vdev = vdev;
3562 
3563 	dp_peer_add_ast(soc, peer, peer_mac_addr, CDP_TXRX_AST_TYPE_STATIC, 0);
3564 
3565 	qdf_spinlock_create(&peer->peer_info_lock);
3566 
3567 	qdf_mem_copy(
3568 		&peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3569 
3570 	/* TODO: See of rx_opt_proc is really required */
3571 	peer->rx_opt_proc = soc->rx_opt_proc;
3572 
3573 	/* initialize the peer_id */
3574 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3575 		peer->peer_ids[i] = HTT_INVALID_PEER;
3576 
3577 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3578 
3579 	qdf_atomic_init(&peer->ref_cnt);
3580 
3581 	/* keep one reference for attach */
3582 	qdf_atomic_inc(&peer->ref_cnt);
3583 
3584 	/* add this peer into the vdev's list */
3585 	if (wlan_op_mode_sta == vdev->opmode)
3586 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
3587 	else
3588 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
3589 
3590 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3591 
3592 	/* TODO: See if hash based search is required */
3593 	dp_peer_find_hash_add(soc, peer);
3594 
3595 	/* Initialize the peer state */
3596 	peer->state = OL_TXRX_PEER_STATE_DISC;
3597 
3598 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3599 		"vdev %pK created peer %pK (%pM) ref_cnt: %d",
3600 		vdev, peer, peer->mac_addr.raw,
3601 		qdf_atomic_read(&peer->ref_cnt));
3602 	/*
3603 	 * For every peer MAp message search and set if bss_peer
3604 	 */
3605 	if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
3606 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3607 			"vdev bss_peer!!!!");
3608 		peer->bss_peer = 1;
3609 		vdev->vap_bss_peer = peer;
3610 	}
3611 
3612 
3613 #ifndef CONFIG_WIN
3614 	dp_local_peer_id_alloc(pdev, peer);
3615 #endif
3616 	DP_STATS_INIT(peer);
3617 	return (void *)peer;
3618 }
3619 
3620 /*
3621  * dp_peer_setup_wifi3() - initialize the peer
3622  * @vdev_hdl: virtual device object
3623  * @peer: Peer object
3624  *
3625  * Return: void
3626  */
3627 static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
3628 {
3629 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
3630 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
3631 	struct dp_pdev *pdev;
3632 	struct dp_soc *soc;
3633 	bool hash_based = 0;
3634 	enum cdp_host_reo_dest_ring reo_dest;
3635 
3636 	/* preconditions */
3637 	qdf_assert(vdev);
3638 	qdf_assert(peer);
3639 
3640 	pdev = vdev->pdev;
3641 	soc = pdev->soc;
3642 
3643 	peer->last_assoc_rcvd = 0;
3644 	peer->last_disassoc_rcvd = 0;
3645 	peer->last_deauth_rcvd = 0;
3646 
3647 	/*
3648 	 * hash based steering is disabled for Radios which are offloaded
3649 	 * to NSS
3650 	 */
3651 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
3652 		hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
3653 
3654 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3655 		FL("hash based steering for pdev: %d is %d\n"),
3656 		pdev->pdev_id, hash_based);
3657 
3658 	/*
3659 	 * Below line of code will ensure the proper reo_dest ring is chosen
3660 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
3661 	 */
3662 	reo_dest = pdev->reo_dest;
3663 
3664 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
3665 		/* TODO: Check the destination ring number to be passed to FW */
3666 		soc->cdp_soc.ol_ops->peer_set_default_routing(
3667 			pdev->osif_pdev, peer->mac_addr.raw,
3668 			 peer->vdev->vdev_id, hash_based, reo_dest);
3669 	}
3670 
3671 	dp_peer_rx_init(pdev, peer);
3672 	return;
3673 }
3674 
3675 /*
3676  * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
3677  * @vdev_handle: virtual device object
3678  * @htt_pkt_type: type of pkt
3679  *
3680  * Return: void
3681  */
3682 static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
3683 	 enum htt_cmn_pkt_type val)
3684 {
3685 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3686 	vdev->tx_encap_type = val;
3687 }
3688 
3689 /*
3690  * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
3691  * @vdev_handle: virtual device object
3692  * @htt_pkt_type: type of pkt
3693  *
3694  * Return: void
3695  */
3696 static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
3697 	 enum htt_cmn_pkt_type val)
3698 {
3699 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3700 	vdev->rx_decap_type = val;
3701 }
3702 
3703 /*
3704  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
3705  * @pdev_handle: physical device object
3706  * @val: reo destination ring index (1 - 4)
3707  *
3708  * Return: void
3709  */
3710 static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
3711 	 enum cdp_host_reo_dest_ring val)
3712 {
3713 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3714 
3715 	if (pdev)
3716 		pdev->reo_dest = val;
3717 }
3718 
3719 /*
3720  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
3721  * @pdev_handle: physical device object
3722  *
3723  * Return: reo destination ring index
3724  */
3725 static enum cdp_host_reo_dest_ring
3726 dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
3727 {
3728 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3729 
3730 	if (pdev)
3731 		return pdev->reo_dest;
3732 	else
3733 		return cdp_host_reo_dest_ring_unknown;
3734 }
3735 
3736 #ifdef QCA_SUPPORT_SON
3737 static void dp_son_peer_authorize(struct dp_peer *peer)
3738 {
3739 	struct dp_soc *soc;
3740 	soc = peer->vdev->pdev->soc;
3741 	peer->peer_bs_inact_flag = 0;
3742 	peer->peer_bs_inact = soc->pdev_bs_inact_reload;
3743 	return;
3744 }
3745 #else
3746 static void dp_son_peer_authorize(struct dp_peer *peer)
3747 {
3748 	return;
3749 }
3750 #endif
3751 /*
3752  * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
3753  * @pdev_handle: device object
3754  * @val: value to be set
3755  *
3756  * Return: void
3757  */
3758 static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
3759 	 uint32_t val)
3760 {
3761 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3762 
3763 	/* Enable/Disable smart mesh filtering. This flag will be checked
3764 	 * during rx processing to check if packets are from NAC clients.
3765 	 */
3766 	pdev->filter_neighbour_peers = val;
3767 	return 0;
3768 }
3769 
3770 /*
3771  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
3772  * address for smart mesh filtering
3773  * @pdev_handle: device object
3774  * @cmd: Add/Del command
3775  * @macaddr: nac client mac address
3776  *
3777  * Return: void
3778  */
3779 static int dp_update_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
3780 	 uint32_t cmd, uint8_t *macaddr)
3781 {
3782 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3783 	struct dp_neighbour_peer *peer = NULL;
3784 
3785 	if (!macaddr)
3786 		goto fail0;
3787 
3788 	/* Store address of NAC (neighbour peer) which will be checked
3789 	 * against TA of received packets.
3790 	 */
3791 	if (cmd == DP_NAC_PARAM_ADD) {
3792 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
3793 				sizeof(*peer));
3794 
3795 		if (!peer) {
3796 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3797 				FL("DP neighbour peer node memory allocation failed"));
3798 			goto fail0;
3799 		}
3800 
3801 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
3802 			macaddr, DP_MAC_ADDR_LEN);
3803 
3804 
3805 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3806 		/* add this neighbour peer into the list */
3807 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
3808 				neighbour_peer_list_elem);
3809 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3810 
3811 		return 1;
3812 
3813 	} else if (cmd == DP_NAC_PARAM_DEL) {
3814 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3815 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
3816 				neighbour_peer_list_elem) {
3817 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
3818 				macaddr, DP_MAC_ADDR_LEN)) {
3819 				/* delete this peer from the list */
3820 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
3821 					peer, neighbour_peer_list_elem);
3822 				qdf_mem_free(peer);
3823 				break;
3824 			}
3825 		}
3826 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3827 
3828 		return 1;
3829 
3830 	}
3831 
3832 fail0:
3833 	return 0;
3834 }
3835 
3836 /*
3837  * dp_get_sec_type() - Get the security type
3838  * @peer:		Datapath peer handle
3839  * @sec_idx:    Security id (mcast, ucast)
3840  *
3841  * return sec_type: Security type
3842  */
3843 static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
3844 {
3845 	struct dp_peer *dpeer = (struct dp_peer *)peer;
3846 
3847 	return dpeer->security[sec_idx].sec_type;
3848 }
3849 
3850 /*
3851  * dp_peer_authorize() - authorize txrx peer
3852  * @peer_handle:		Datapath peer handle
3853  * @authorize
3854  *
3855  */
3856 static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
3857 {
3858 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
3859 	struct dp_soc *soc;
3860 
3861 	if (peer != NULL) {
3862 		soc = peer->vdev->pdev->soc;
3863 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
3864 		dp_son_peer_authorize(peer);
3865 		peer->authorize = authorize ? 1 : 0;
3866 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3867 	}
3868 }
3869 
3870 #ifdef QCA_SUPPORT_SON
3871 /*
3872  * dp_txrx_update_inact_threshold() - Update inact timer threshold
3873  * @pdev_handle: Device handle
3874  * @new_threshold : updated threshold value
3875  *
3876  */
3877 static void
3878 dp_txrx_update_inact_threshold(struct cdp_pdev *pdev_handle,
3879 			       u_int16_t new_threshold)
3880 {
3881 	struct dp_vdev *vdev;
3882 	struct dp_peer *peer;
3883 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3884 	struct dp_soc *soc = pdev->soc;
3885 	u_int16_t old_threshold = soc->pdev_bs_inact_reload;
3886 
3887 	if (old_threshold == new_threshold)
3888 		return;
3889 
3890 	soc->pdev_bs_inact_reload = new_threshold;
3891 
3892 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3893 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3894 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
3895 		if (vdev->opmode != wlan_op_mode_ap)
3896 			continue;
3897 
3898 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3899 			if (!peer->authorize)
3900 				continue;
3901 
3902 			if (old_threshold - peer->peer_bs_inact >=
3903 					new_threshold) {
3904 				dp_mark_peer_inact((void *)peer, true);
3905 				peer->peer_bs_inact = 0;
3906 			} else {
3907 				peer->peer_bs_inact = new_threshold -
3908 					(old_threshold - peer->peer_bs_inact);
3909 			}
3910 		}
3911 	}
3912 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3913 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3914 }
3915 
3916 /**
3917  * dp_txrx_reset_inact_count(): Reset inact count
3918  * @pdev_handle - device handle
3919  *
3920  * Return: void
3921  */
3922 static void
3923 dp_txrx_reset_inact_count(struct cdp_pdev *pdev_handle)
3924 {
3925 	struct dp_vdev *vdev = NULL;
3926 	struct dp_peer *peer = NULL;
3927 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3928 	struct dp_soc *soc = pdev->soc;
3929 
3930 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3931 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3932 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
3933 		if (vdev->opmode != wlan_op_mode_ap)
3934 			continue;
3935 
3936 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3937 			if (!peer->authorize)
3938 				continue;
3939 
3940 			peer->peer_bs_inact = soc->pdev_bs_inact_reload;
3941 		}
3942 	}
3943 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3944 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3945 }
3946 
3947 /**
3948  * dp_set_inact_params(): set inactivity params
3949  * @pdev_handle - device handle
3950  * @inact_check_interval - inactivity interval
3951  * @inact_normal - Inactivity normal
3952  * @inact_overload - Inactivity overload
3953  *
3954  * Return: bool
3955  */
3956 bool dp_set_inact_params(struct cdp_pdev *pdev_handle,
3957 			 u_int16_t inact_check_interval,
3958 			 u_int16_t inact_normal, u_int16_t inact_overload)
3959 {
3960 	struct dp_soc *soc;
3961 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3962 
3963 	if (!pdev)
3964 		return false;
3965 
3966 	soc = pdev->soc;
3967 	if (!soc)
3968 		return false;
3969 
3970 	soc->pdev_bs_inact_interval = inact_check_interval;
3971 	soc->pdev_bs_inact_normal = inact_normal;
3972 	soc->pdev_bs_inact_overload = inact_overload;
3973 
3974 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
3975 					soc->pdev_bs_inact_normal);
3976 
3977 	return true;
3978 }
3979 
3980 /**
3981  * dp_start_inact_timer(): Inactivity timer start
3982  * @pdev_handle - device handle
3983  * @enable - Inactivity timer start/stop
3984  *
3985  * Return: bool
3986  */
3987 bool dp_start_inact_timer(struct cdp_pdev *pdev_handle, bool enable)
3988 {
3989 	struct dp_soc *soc;
3990 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3991 
3992 	if (!pdev)
3993 		return false;
3994 
3995 	soc = pdev->soc;
3996 	if (!soc)
3997 		return false;
3998 
3999 	if (enable) {
4000 		dp_txrx_reset_inact_count((struct cdp_pdev *)pdev);
4001 		qdf_timer_mod(&soc->pdev_bs_inact_timer,
4002 			      soc->pdev_bs_inact_interval * 1000);
4003 	} else {
4004 		qdf_timer_stop(&soc->pdev_bs_inact_timer);
4005 	}
4006 
4007 	return true;
4008 }
4009 
4010 /**
4011  * dp_set_overload(): Set inactivity overload
4012  * @pdev_handle - device handle
4013  * @overload - overload status
4014  *
4015  * Return: void
4016  */
4017 void dp_set_overload(struct cdp_pdev *pdev_handle, bool overload)
4018 {
4019 	struct dp_soc *soc;
4020 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4021 
4022 	if (!pdev)
4023 		return;
4024 
4025 	soc = pdev->soc;
4026 	if (!soc)
4027 		return;
4028 
4029 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4030 			overload ? soc->pdev_bs_inact_overload :
4031 			soc->pdev_bs_inact_normal);
4032 }
4033 
4034 /**
4035  * dp_peer_is_inact(): check whether peer is inactive
4036  * @peer_handle - datapath peer handle
4037  *
4038  * Return: bool
4039  */
4040 bool dp_peer_is_inact(void *peer_handle)
4041 {
4042 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4043 
4044 	if (!peer)
4045 		return false;
4046 
4047 	return peer->peer_bs_inact_flag == 1;
4048 }
4049 
4050 /**
4051  * dp_init_inact_timer: initialize the inact timer
4052  * @soc - SOC handle
4053  *
4054  * Return: void
4055  */
4056 void dp_init_inact_timer(struct dp_soc *soc)
4057 {
4058 	qdf_timer_init(soc->osdev, &soc->pdev_bs_inact_timer,
4059 		dp_txrx_peer_find_inact_timeout_handler,
4060 		(void *)soc, QDF_TIMER_TYPE_WAKE_APPS);
4061 }
4062 
4063 #else
4064 
4065 bool dp_set_inact_params(struct cdp_pdev *pdev, u_int16_t inact_check_interval,
4066 			 u_int16_t inact_normal, u_int16_t inact_overload)
4067 {
4068 	return false;
4069 }
4070 
4071 bool dp_start_inact_timer(struct cdp_pdev *pdev, bool enable)
4072 {
4073 	return false;
4074 }
4075 
4076 void dp_set_overload(struct cdp_pdev *pdev, bool overload)
4077 {
4078 	return;
4079 }
4080 
4081 void dp_init_inact_timer(struct dp_soc *soc)
4082 {
4083 	return;
4084 }
4085 
4086 bool dp_peer_is_inact(void *peer)
4087 {
4088 	return false;
4089 }
4090 #endif
4091 
4092 /*
4093  * dp_peer_unref_delete() - unref and delete peer
4094  * @peer_handle:		Datapath peer handle
4095  *
4096  */
4097 void dp_peer_unref_delete(void *peer_handle)
4098 {
4099 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4100 	struct dp_peer *bss_peer = NULL;
4101 	struct dp_vdev *vdev = peer->vdev;
4102 	struct dp_pdev *pdev = vdev->pdev;
4103 	struct dp_soc *soc = pdev->soc;
4104 	struct dp_peer *tmppeer;
4105 	int found = 0;
4106 	uint16_t peer_id;
4107 	uint16_t vdev_id;
4108 
4109 	/*
4110 	 * Hold the lock all the way from checking if the peer ref count
4111 	 * is zero until the peer references are removed from the hash
4112 	 * table and vdev list (if the peer ref count is zero).
4113 	 * This protects against a new HL tx operation starting to use the
4114 	 * peer object just after this function concludes it's done being used.
4115 	 * Furthermore, the lock needs to be held while checking whether the
4116 	 * vdev's list of peers is empty, to make sure that list is not modified
4117 	 * concurrently with the empty check.
4118 	 */
4119 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4120 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4121 		  "%s: peer %pK ref_cnt(before decrement): %d\n", __func__,
4122 		  peer, qdf_atomic_read(&peer->ref_cnt));
4123 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
4124 		peer_id = peer->peer_ids[0];
4125 		vdev_id = vdev->vdev_id;
4126 
4127 		/*
4128 		 * Make sure that the reference to the peer in
4129 		 * peer object map is removed
4130 		 */
4131 		if (peer_id != HTT_INVALID_PEER)
4132 			soc->peer_id_to_obj_map[peer_id] = NULL;
4133 
4134 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4135 			"Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
4136 
4137 		/* remove the reference to the peer from the hash table */
4138 		dp_peer_find_hash_remove(soc, peer);
4139 
4140 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
4141 			if (tmppeer == peer) {
4142 				found = 1;
4143 				break;
4144 			}
4145 		}
4146 		if (found) {
4147 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
4148 				peer_list_elem);
4149 		} else {
4150 			/*Ignoring the remove operation as peer not found*/
4151 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
4152 				"peer %pK not found in vdev (%pK)->peer_list:%pK",
4153 				peer, vdev, &peer->vdev->peer_list);
4154 		}
4155 
4156 		/* cleanup the peer data */
4157 		dp_peer_cleanup(vdev, peer);
4158 
4159 		/* check whether the parent vdev has no peers left */
4160 		if (TAILQ_EMPTY(&vdev->peer_list)) {
4161 			/*
4162 			 * Now that there are no references to the peer, we can
4163 			 * release the peer reference lock.
4164 			 */
4165 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4166 			/*
4167 			 * Check if the parent vdev was waiting for its peers
4168 			 * to be deleted, in order for it to be deleted too.
4169 			 */
4170 			if (vdev->delete.pending) {
4171 				ol_txrx_vdev_delete_cb vdev_delete_cb =
4172 					vdev->delete.callback;
4173 				void *vdev_delete_context =
4174 					vdev->delete.context;
4175 
4176 				QDF_TRACE(QDF_MODULE_ID_DP,
4177 					QDF_TRACE_LEVEL_INFO_HIGH,
4178 					FL("deleting vdev object %pK (%pM)"
4179 					" - its last peer is done"),
4180 					vdev, vdev->mac_addr.raw);
4181 				/* all peers are gone, go ahead and delete it */
4182 				dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
4183 								FLOW_TYPE_VDEV,
4184 								vdev_id);
4185 				dp_tx_vdev_detach(vdev);
4186 				QDF_TRACE(QDF_MODULE_ID_DP,
4187 					QDF_TRACE_LEVEL_INFO_HIGH,
4188 					FL("deleting vdev object %pK (%pM)"),
4189 					vdev, vdev->mac_addr.raw);
4190 
4191 				qdf_mem_free(vdev);
4192 				vdev = NULL;
4193 				if (vdev_delete_cb)
4194 					vdev_delete_cb(vdev_delete_context);
4195 			}
4196 		} else {
4197 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4198 		}
4199 
4200 		if (vdev) {
4201 			if (vdev->vap_bss_peer == peer) {
4202 				vdev->vap_bss_peer = NULL;
4203 			}
4204 		}
4205 
4206 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
4207 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->osif_pdev,
4208 					vdev_id, peer->mac_addr.raw);
4209 		}
4210 
4211 		if (!vdev || !vdev->vap_bss_peer) {
4212 			goto free_peer;
4213 		}
4214 
4215 #ifdef notyet
4216 		qdf_mempool_free(soc->osdev, soc->mempool_ol_ath_peer, peer);
4217 #else
4218 		bss_peer = vdev->vap_bss_peer;
4219 		DP_UPDATE_STATS(bss_peer, peer);
4220 
4221 free_peer:
4222 		qdf_mem_free(peer);
4223 
4224 #endif
4225 	} else {
4226 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4227 	}
4228 }
4229 
4230 /*
4231  * dp_peer_detach_wifi3() – Detach txrx peer
4232  * @peer_handle: Datapath peer handle
4233  * @bitmap: bitmap indicating special handling of request.
4234  *
4235  */
4236 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
4237 {
4238 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4239 
4240 	/* redirect the peer's rx delivery function to point to a
4241 	 * discard func
4242 	 */
4243 
4244 	peer->rx_opt_proc = dp_rx_discard;
4245 
4246 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4247 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
4248 
4249 #ifndef CONFIG_WIN
4250 	dp_local_peer_id_free(peer->vdev->pdev, peer);
4251 #endif
4252 	qdf_spinlock_destroy(&peer->peer_info_lock);
4253 
4254 	/*
4255 	 * Remove the reference added during peer_attach.
4256 	 * The peer will still be left allocated until the
4257 	 * PEER_UNMAP message arrives to remove the other
4258 	 * reference, added by the PEER_MAP message.
4259 	 */
4260 	dp_peer_unref_delete(peer_handle);
4261 }
4262 
4263 /*
4264  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
4265  * @peer_handle:		Datapath peer handle
4266  *
4267  */
4268 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
4269 {
4270 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4271 	return vdev->mac_addr.raw;
4272 }
4273 
4274 /*
4275  * dp_vdev_set_wds() - Enable per packet stats
4276  * @vdev_handle: DP VDEV handle
4277  * @val: value
4278  *
4279  * Return: none
4280  */
4281 static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
4282 {
4283 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4284 
4285 	vdev->wds_enabled = val;
4286 	return 0;
4287 }
4288 
4289 /*
4290  * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
4291  * @peer_handle:		Datapath peer handle
4292  *
4293  */
4294 static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
4295 						uint8_t vdev_id)
4296 {
4297 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
4298 	struct dp_vdev *vdev = NULL;
4299 
4300 	if (qdf_unlikely(!pdev))
4301 		return NULL;
4302 
4303 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4304 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4305 		if (vdev->vdev_id == vdev_id)
4306 			break;
4307 	}
4308 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4309 
4310 	return (struct cdp_vdev *)vdev;
4311 }
4312 
4313 static int dp_get_opmode(struct cdp_vdev *vdev_handle)
4314 {
4315 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4316 
4317 	return vdev->opmode;
4318 }
4319 
4320 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
4321 {
4322 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4323 	struct dp_pdev *pdev = vdev->pdev;
4324 
4325 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
4326 }
4327 
4328 /**
4329  * dp_reset_monitor_mode() - Disable monitor mode
4330  * @pdev_handle: Datapath PDEV handle
4331  *
4332  * Return: 0 on success, not 0 on failure
4333  */
4334 static int dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
4335 {
4336 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4337 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4338 	struct dp_soc *soc = pdev->soc;
4339 	uint8_t pdev_id;
4340 	int mac_id;
4341 
4342 	pdev_id = pdev->pdev_id;
4343 	soc = pdev->soc;
4344 
4345 	qdf_spin_lock_bh(&pdev->mon_lock);
4346 
4347 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4348 
4349 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4350 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4351 
4352 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4353 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4354 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4355 
4356 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4357 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4358 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4359 	}
4360 
4361 	pdev->monitor_vdev = NULL;
4362 
4363 	qdf_spin_unlock_bh(&pdev->mon_lock);
4364 
4365 	return 0;
4366 }
4367 
4368 /**
4369  * dp_set_nac() - set peer_nac
4370  * @peer_handle: Datapath PEER handle
4371  *
4372  * Return: void
4373  */
4374 static void dp_set_nac(struct cdp_peer *peer_handle)
4375 {
4376 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4377 
4378 	peer->nac = 1;
4379 }
4380 
4381 /**
4382  * dp_get_tx_pending() - read pending tx
4383  * @pdev_handle: Datapath PDEV handle
4384  *
4385  * Return: outstanding tx
4386  */
4387 static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
4388 {
4389 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4390 
4391 	return qdf_atomic_read(&pdev->num_tx_outstanding);
4392 }
4393 
4394 /**
4395  * dp_get_peer_mac_from_peer_id() - get peer mac
4396  * @pdev_handle: Datapath PDEV handle
4397  * @peer_id: Peer ID
4398  * @peer_mac: MAC addr of PEER
4399  *
4400  * Return: void
4401  */
4402 static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
4403 	uint32_t peer_id, uint8_t *peer_mac)
4404 {
4405 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4406 	struct dp_peer *peer;
4407 
4408 	if (pdev && peer_mac) {
4409 		peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
4410 		if (peer && peer->mac_addr.raw) {
4411 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
4412 					DP_MAC_ADDR_LEN);
4413 		}
4414 	}
4415 }
4416 
4417 /**
4418  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
4419  * @vdev_handle: Datapath VDEV handle
4420  * @smart_monitor: Flag to denote if its smart monitor mode
4421  *
4422  * Return: 0 on success, not 0 on failure
4423  */
4424 static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
4425 		uint8_t smart_monitor)
4426 {
4427 	/* Many monitor VAPs can exists in a system but only one can be up at
4428 	 * anytime
4429 	 */
4430 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4431 	struct dp_pdev *pdev;
4432 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4433 	struct dp_soc *soc;
4434 	uint8_t pdev_id;
4435 	int mac_id;
4436 
4437 	qdf_assert(vdev);
4438 
4439 	pdev = vdev->pdev;
4440 	pdev_id = pdev->pdev_id;
4441 	soc = pdev->soc;
4442 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4443 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
4444 		pdev, pdev_id, soc, vdev);
4445 
4446 	/*Check if current pdev's monitor_vdev exists */
4447 	if (pdev->monitor_vdev) {
4448 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4449 			"vdev=%pK\n", vdev);
4450 		qdf_assert(vdev);
4451 	}
4452 
4453 	pdev->monitor_vdev = vdev;
4454 
4455 	/* If smart monitor mode, do not configure monitor ring */
4456 	if (smart_monitor)
4457 		return QDF_STATUS_SUCCESS;
4458 
4459 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4460 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]\n",
4461 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4462 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4463 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4464 		pdev->mo_data_filter);
4465 
4466 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4467 
4468 	htt_tlv_filter.mpdu_start = 1;
4469 	htt_tlv_filter.msdu_start = 1;
4470 	htt_tlv_filter.packet = 1;
4471 	htt_tlv_filter.msdu_end = 1;
4472 	htt_tlv_filter.mpdu_end = 1;
4473 	htt_tlv_filter.packet_header = 1;
4474 	htt_tlv_filter.attention = 1;
4475 	htt_tlv_filter.ppdu_start = 0;
4476 	htt_tlv_filter.ppdu_end = 0;
4477 	htt_tlv_filter.ppdu_end_user_stats = 0;
4478 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4479 	htt_tlv_filter.ppdu_end_status_done = 0;
4480 	htt_tlv_filter.header_per_msdu = 1;
4481 	htt_tlv_filter.enable_fp =
4482 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4483 	htt_tlv_filter.enable_md = 0;
4484 	htt_tlv_filter.enable_mo =
4485 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4486 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4487 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4488 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4489 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4490 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4491 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4492 
4493 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4494 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4495 
4496 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4497 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4498 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4499 	}
4500 
4501 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4502 
4503 	htt_tlv_filter.mpdu_start = 1;
4504 	htt_tlv_filter.msdu_start = 0;
4505 	htt_tlv_filter.packet = 0;
4506 	htt_tlv_filter.msdu_end = 0;
4507 	htt_tlv_filter.mpdu_end = 0;
4508 	htt_tlv_filter.attention = 0;
4509 	htt_tlv_filter.ppdu_start = 1;
4510 	htt_tlv_filter.ppdu_end = 1;
4511 	htt_tlv_filter.ppdu_end_user_stats = 1;
4512 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4513 	htt_tlv_filter.ppdu_end_status_done = 1;
4514 	htt_tlv_filter.enable_fp = 1;
4515 	htt_tlv_filter.enable_md = 0;
4516 	htt_tlv_filter.enable_mo = 1;
4517 	if (pdev->mcopy_mode) {
4518 		htt_tlv_filter.packet_header = 1;
4519 	}
4520 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
4521 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
4522 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
4523 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
4524 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
4525 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
4526 
4527 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4528 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4529 						pdev->pdev_id);
4530 
4531 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4532 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4533 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4534 	}
4535 
4536 	return QDF_STATUS_SUCCESS;
4537 }
4538 
4539 /**
4540  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
4541  * @pdev_handle: Datapath PDEV handle
4542  * @filter_val: Flag to select Filter for monitor mode
4543  * Return: 0 on success, not 0 on failure
4544  */
4545 static int dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
4546 	struct cdp_monitor_filter *filter_val)
4547 {
4548 	/* Many monitor VAPs can exists in a system but only one can be up at
4549 	 * anytime
4550 	 */
4551 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4552 	struct dp_vdev *vdev = pdev->monitor_vdev;
4553 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4554 	struct dp_soc *soc;
4555 	uint8_t pdev_id;
4556 	int mac_id;
4557 
4558 	pdev_id = pdev->pdev_id;
4559 	soc = pdev->soc;
4560 
4561 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4562 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
4563 		pdev, pdev_id, soc, vdev);
4564 
4565 	/*Check if current pdev's monitor_vdev exists */
4566 	if (!pdev->monitor_vdev) {
4567 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4568 			"vdev=%pK\n", vdev);
4569 		qdf_assert(vdev);
4570 	}
4571 
4572 	/* update filter mode, type in pdev structure */
4573 	pdev->mon_filter_mode = filter_val->mode;
4574 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
4575 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
4576 	pdev->fp_data_filter = filter_val->fp_data;
4577 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
4578 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
4579 	pdev->mo_data_filter = filter_val->mo_data;
4580 
4581 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4582 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]\n",
4583 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4584 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4585 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4586 		pdev->mo_data_filter);
4587 
4588 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4589 
4590 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4591 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4592 
4593 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4594 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4595 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4596 
4597 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4598 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4599 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4600 	}
4601 
4602 	htt_tlv_filter.mpdu_start = 1;
4603 	htt_tlv_filter.msdu_start = 1;
4604 	htt_tlv_filter.packet = 1;
4605 	htt_tlv_filter.msdu_end = 1;
4606 	htt_tlv_filter.mpdu_end = 1;
4607 	htt_tlv_filter.packet_header = 1;
4608 	htt_tlv_filter.attention = 1;
4609 	htt_tlv_filter.ppdu_start = 0;
4610 	htt_tlv_filter.ppdu_end = 0;
4611 	htt_tlv_filter.ppdu_end_user_stats = 0;
4612 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4613 	htt_tlv_filter.ppdu_end_status_done = 0;
4614 	htt_tlv_filter.header_per_msdu = 1;
4615 	htt_tlv_filter.enable_fp =
4616 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4617 	htt_tlv_filter.enable_md = 0;
4618 	htt_tlv_filter.enable_mo =
4619 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4620 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4621 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4622 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4623 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4624 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4625 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4626 
4627 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4628 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4629 
4630 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4631 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4632 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4633 	}
4634 
4635 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4636 
4637 	htt_tlv_filter.mpdu_start = 1;
4638 	htt_tlv_filter.msdu_start = 0;
4639 	htt_tlv_filter.packet = 0;
4640 	htt_tlv_filter.msdu_end = 0;
4641 	htt_tlv_filter.mpdu_end = 0;
4642 	htt_tlv_filter.attention = 0;
4643 	htt_tlv_filter.ppdu_start = 1;
4644 	htt_tlv_filter.ppdu_end = 1;
4645 	htt_tlv_filter.ppdu_end_user_stats = 1;
4646 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4647 	htt_tlv_filter.ppdu_end_status_done = 1;
4648 	htt_tlv_filter.enable_fp = 1;
4649 	htt_tlv_filter.enable_md = 0;
4650 	htt_tlv_filter.enable_mo = 1;
4651 	if (pdev->mcopy_mode) {
4652 		htt_tlv_filter.packet_header = 1;
4653 	}
4654 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
4655 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
4656 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
4657 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
4658 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
4659 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
4660 
4661 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4662 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4663 						pdev->pdev_id);
4664 
4665 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4666 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4667 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4668 	}
4669 
4670 	return QDF_STATUS_SUCCESS;
4671 }
4672 
4673 /**
4674  * dp_get_pdev_id_frm_pdev() - get pdev_id
4675  * @pdev_handle: Datapath PDEV handle
4676  *
4677  * Return: pdev_id
4678  */
4679 static
4680 uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
4681 {
4682 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4683 
4684 	return pdev->pdev_id;
4685 }
4686 
4687 /**
4688  * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
4689  * @vdev_handle: Datapath VDEV handle
4690  * Return: true on ucast filter flag set
4691  */
4692 static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
4693 {
4694 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4695 	struct dp_pdev *pdev;
4696 
4697 	pdev = vdev->pdev;
4698 
4699 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
4700 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
4701 		return true;
4702 
4703 	return false;
4704 }
4705 
4706 /**
4707  * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
4708  * @vdev_handle: Datapath VDEV handle
4709  * Return: true on mcast filter flag set
4710  */
4711 static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
4712 {
4713 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4714 	struct dp_pdev *pdev;
4715 
4716 	pdev = vdev->pdev;
4717 
4718 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
4719 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
4720 		return true;
4721 
4722 	return false;
4723 }
4724 
4725 /**
4726  * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
4727  * @vdev_handle: Datapath VDEV handle
4728  * Return: true on non data filter flag set
4729  */
4730 static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
4731 {
4732 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4733 	struct dp_pdev *pdev;
4734 
4735 	pdev = vdev->pdev;
4736 
4737 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
4738 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
4739 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
4740 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
4741 			return true;
4742 		}
4743 	}
4744 
4745 	return false;
4746 }
4747 
4748 #ifdef MESH_MODE_SUPPORT
4749 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
4750 {
4751 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4752 
4753 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4754 		FL("val %d"), val);
4755 	vdev->mesh_vdev = val;
4756 }
4757 
4758 /*
4759  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
4760  * @vdev_hdl: virtual device object
4761  * @val: value to be set
4762  *
4763  * Return: void
4764  */
4765 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
4766 {
4767 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4768 
4769 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4770 		FL("val %d"), val);
4771 	vdev->mesh_rx_filter = val;
4772 }
4773 #endif
4774 
4775 /*
4776  * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
4777  * Current scope is bar received count
4778  *
4779  * @pdev_handle: DP_PDEV handle
4780  *
4781  * Return: void
4782  */
4783 #define STATS_PROC_TIMEOUT        (HZ/1000)
4784 
4785 static void
4786 dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
4787 {
4788 	struct dp_vdev *vdev;
4789 	struct dp_peer *peer;
4790 	uint32_t waitcnt;
4791 
4792 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4793 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4794 			if (!peer) {
4795 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4796 					FL("DP Invalid Peer refernce"));
4797 				return;
4798 			}
4799 
4800 			if (peer->delete_in_progress) {
4801 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4802 					FL("DP Peer deletion in progress"));
4803 				continue;
4804 			}
4805 
4806 			qdf_atomic_inc(&peer->ref_cnt);
4807 			waitcnt = 0;
4808 			dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
4809 			while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
4810 				&& waitcnt < 10) {
4811 				schedule_timeout_interruptible(
4812 						STATS_PROC_TIMEOUT);
4813 				waitcnt++;
4814 			}
4815 			qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
4816 			dp_peer_unref_delete(peer);
4817 		}
4818 	}
4819 }
4820 
4821 /**
4822  * dp_rx_bar_stats_cb(): BAR received stats callback
4823  * @soc: SOC handle
4824  * @cb_ctxt: Call back context
4825  * @reo_status: Reo status
4826  *
4827  * return: void
4828  */
4829 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
4830 	union hal_reo_status *reo_status)
4831 {
4832 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
4833 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
4834 
4835 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
4836 		DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
4837 			queue_status->header.status);
4838 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
4839 		return;
4840 	}
4841 
4842 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
4843 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
4844 
4845 }
4846 
4847 /**
4848  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
4849  * @vdev: DP VDEV handle
4850  *
4851  * return: void
4852  */
4853 void dp_aggregate_vdev_stats(struct dp_vdev *vdev)
4854 {
4855 	struct dp_peer *peer = NULL;
4856 	struct dp_soc *soc = vdev->pdev->soc;
4857 
4858 	qdf_mem_set(&(vdev->stats.tx), sizeof(vdev->stats.tx), 0x0);
4859 	qdf_mem_set(&(vdev->stats.rx), sizeof(vdev->stats.rx), 0x0);
4860 
4861 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
4862 		DP_UPDATE_STATS(vdev, peer);
4863 
4864 	if (soc->cdp_soc.ol_ops->update_dp_stats)
4865 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->osif_pdev,
4866 			&vdev->stats, (uint16_t) vdev->vdev_id,
4867 			UPDATE_VDEV_STATS);
4868 
4869 }
4870 
4871 /**
4872  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
4873  * @pdev: DP PDEV handle
4874  *
4875  * return: void
4876  */
4877 static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
4878 {
4879 	struct dp_vdev *vdev = NULL;
4880 	struct dp_soc *soc = pdev->soc;
4881 
4882 	qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
4883 	qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
4884 	qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
4885 
4886 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4887 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4888 
4889 		dp_aggregate_vdev_stats(vdev);
4890 		DP_UPDATE_STATS(pdev, vdev);
4891 
4892 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
4893 
4894 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
4895 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
4896 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
4897 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
4898 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
4899 		DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
4900 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
4901 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host);
4902 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
4903 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host);
4904 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
4905 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
4906 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
4907 		DP_STATS_AGGR(pdev, vdev,
4908 				tx_i.mcast_en.dropped_map_error);
4909 		DP_STATS_AGGR(pdev, vdev,
4910 				tx_i.mcast_en.dropped_self_mac);
4911 		DP_STATS_AGGR(pdev, vdev,
4912 				tx_i.mcast_en.dropped_send_fail);
4913 		DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
4914 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
4915 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
4916 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
4917 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na);
4918 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
4919 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified);
4920 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw);
4921 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.exception_fw);
4922 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.completion_fw);
4923 
4924 		pdev->stats.tx_i.dropped.dropped_pkt.num =
4925 			pdev->stats.tx_i.dropped.dma_error +
4926 			pdev->stats.tx_i.dropped.ring_full +
4927 			pdev->stats.tx_i.dropped.enqueue_fail +
4928 			pdev->stats.tx_i.dropped.desc_na +
4929 			pdev->stats.tx_i.dropped.res_full;
4930 
4931 		pdev->stats.tx.last_ack_rssi =
4932 			vdev->stats.tx.last_ack_rssi;
4933 		pdev->stats.tx_i.tso.num_seg =
4934 			vdev->stats.tx_i.tso.num_seg;
4935 	}
4936 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4937 	if (soc->cdp_soc.ol_ops->update_dp_stats)
4938 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->osif_pdev,
4939 				&pdev->stats, pdev->pdev_id, UPDATE_PDEV_STATS);
4940 
4941 }
4942 
4943 /**
4944  * dp_vdev_getstats() - get vdev packet level stats
4945  * @vdev_handle: Datapath VDEV handle
4946  * @stats: cdp network device stats structure
4947  *
4948  * Return: void
4949  */
4950 static void dp_vdev_getstats(void *vdev_handle,
4951 		struct cdp_dev_stats *stats)
4952 {
4953 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4954 
4955 	dp_aggregate_vdev_stats(vdev);
4956 }
4957 
4958 
4959 /**
4960  * dp_pdev_getstats() - get pdev packet level stats
4961  * @pdev_handle: Datapath PDEV handle
4962  * @stats: cdp network device stats structure
4963  *
4964  * Return: void
4965  */
4966 static void dp_pdev_getstats(void *pdev_handle,
4967 		struct cdp_dev_stats *stats)
4968 {
4969 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4970 
4971 	dp_aggregate_pdev_stats(pdev);
4972 
4973 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
4974 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
4975 
4976 	stats->tx_errors = pdev->stats.tx.tx_failed +
4977 		pdev->stats.tx_i.dropped.dropped_pkt.num;
4978 	stats->tx_dropped = stats->tx_errors;
4979 
4980 	stats->rx_packets = pdev->stats.rx.unicast.num +
4981 		pdev->stats.rx.multicast.num +
4982 		pdev->stats.rx.bcast.num;
4983 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
4984 		pdev->stats.rx.multicast.bytes +
4985 		pdev->stats.rx.bcast.bytes;
4986 }
4987 
4988 /**
4989  * dp_get_device_stats() - get interface level packet stats
4990  * @handle: device handle
4991  * @stats: cdp network device stats structure
4992  * @type: device type pdev/vdev
4993  *
4994  * Return: void
4995  */
4996 static void dp_get_device_stats(void *handle,
4997 		struct cdp_dev_stats *stats, uint8_t type)
4998 {
4999 	switch (type) {
5000 	case UPDATE_VDEV_STATS:
5001 		dp_vdev_getstats(handle, stats);
5002 		break;
5003 	case UPDATE_PDEV_STATS:
5004 		dp_pdev_getstats(handle, stats);
5005 		break;
5006 	default:
5007 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5008 			"apstats cannot be updated for this input "
5009 			"type %d\n", type);
5010 		break;
5011 	}
5012 
5013 }
5014 
5015 
5016 /**
5017  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
5018  * @pdev: DP_PDEV Handle
5019  *
5020  * Return:void
5021  */
5022 static inline void
5023 dp_print_pdev_tx_stats(struct dp_pdev *pdev)
5024 {
5025 	uint8_t index = 0;
5026 	DP_PRINT_STATS("PDEV Tx Stats:\n");
5027 	DP_PRINT_STATS("Received From Stack:");
5028 	DP_PRINT_STATS("	Packets = %d",
5029 			pdev->stats.tx_i.rcvd.num);
5030 	DP_PRINT_STATS("	Bytes = %llu",
5031 			pdev->stats.tx_i.rcvd.bytes);
5032 	DP_PRINT_STATS("Processed:");
5033 	DP_PRINT_STATS("	Packets = %d",
5034 			pdev->stats.tx_i.processed.num);
5035 	DP_PRINT_STATS("	Bytes = %llu",
5036 			pdev->stats.tx_i.processed.bytes);
5037 	DP_PRINT_STATS("Total Completions:");
5038 	DP_PRINT_STATS("	Packets = %u",
5039 			pdev->stats.tx.comp_pkt.num);
5040 	DP_PRINT_STATS("	Bytes = %llu",
5041 			pdev->stats.tx.comp_pkt.bytes);
5042 	DP_PRINT_STATS("Successful Completions:");
5043 	DP_PRINT_STATS("	Packets = %u",
5044 			pdev->stats.tx.tx_success.num);
5045 	DP_PRINT_STATS("	Bytes = %llu",
5046 			pdev->stats.tx.tx_success.bytes);
5047 	DP_PRINT_STATS("Dropped:");
5048 	DP_PRINT_STATS("	Total = %d",
5049 			pdev->stats.tx_i.dropped.dropped_pkt.num);
5050 	DP_PRINT_STATS("	Dma_map_error = %d",
5051 			pdev->stats.tx_i.dropped.dma_error);
5052 	DP_PRINT_STATS("	Ring Full = %d",
5053 			pdev->stats.tx_i.dropped.ring_full);
5054 	DP_PRINT_STATS("	Descriptor Not available = %d",
5055 			pdev->stats.tx_i.dropped.desc_na);
5056 	DP_PRINT_STATS("	HW enqueue failed= %d",
5057 			pdev->stats.tx_i.dropped.enqueue_fail);
5058 	DP_PRINT_STATS("	Resources Full = %d",
5059 			pdev->stats.tx_i.dropped.res_full);
5060 	DP_PRINT_STATS("	FW removed = %d",
5061 			pdev->stats.tx.dropped.fw_rem);
5062 	DP_PRINT_STATS("	FW removed transmitted = %d",
5063 			pdev->stats.tx.dropped.fw_rem_tx);
5064 	DP_PRINT_STATS("	FW removed untransmitted = %d",
5065 			pdev->stats.tx.dropped.fw_rem_notx);
5066 	DP_PRINT_STATS("	FW removed untransmitted fw_reason1 = %d",
5067 			pdev->stats.tx.dropped.fw_reason1);
5068 	DP_PRINT_STATS("	FW removed untransmitted fw_reason2 = %d",
5069 			pdev->stats.tx.dropped.fw_reason2);
5070 	DP_PRINT_STATS("	FW removed untransmitted fw_reason3 = %d",
5071 			pdev->stats.tx.dropped.fw_reason3);
5072 	DP_PRINT_STATS("	Aged Out from msdu/mpdu queues = %d",
5073 			pdev->stats.tx.dropped.age_out);
5074 	DP_PRINT_STATS("Scatter Gather:");
5075 	DP_PRINT_STATS("	Packets = %d",
5076 			pdev->stats.tx_i.sg.sg_pkt.num);
5077 	DP_PRINT_STATS("	Bytes = %llu",
5078 			pdev->stats.tx_i.sg.sg_pkt.bytes);
5079 	DP_PRINT_STATS("	Dropped By Host = %d",
5080 			pdev->stats.tx_i.sg.dropped_host);
5081 	DP_PRINT_STATS("	Dropped By Target = %d",
5082 			pdev->stats.tx_i.sg.dropped_target);
5083 	DP_PRINT_STATS("TSO:");
5084 	DP_PRINT_STATS("	Number of Segments = %d",
5085 			pdev->stats.tx_i.tso.num_seg);
5086 	DP_PRINT_STATS("	Packets = %d",
5087 			pdev->stats.tx_i.tso.tso_pkt.num);
5088 	DP_PRINT_STATS("	Bytes = %llu",
5089 			pdev->stats.tx_i.tso.tso_pkt.bytes);
5090 	DP_PRINT_STATS("	Dropped By Host = %d",
5091 			pdev->stats.tx_i.tso.dropped_host);
5092 	DP_PRINT_STATS("Mcast Enhancement:");
5093 	DP_PRINT_STATS("	Packets = %d",
5094 			pdev->stats.tx_i.mcast_en.mcast_pkt.num);
5095 	DP_PRINT_STATS("	Bytes = %llu",
5096 			pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
5097 	DP_PRINT_STATS("	Dropped: Map Errors = %d",
5098 			pdev->stats.tx_i.mcast_en.dropped_map_error);
5099 	DP_PRINT_STATS("	Dropped: Self Mac = %d",
5100 			pdev->stats.tx_i.mcast_en.dropped_self_mac);
5101 	DP_PRINT_STATS("	Dropped: Send Fail = %d",
5102 			pdev->stats.tx_i.mcast_en.dropped_send_fail);
5103 	DP_PRINT_STATS("	Unicast sent = %d",
5104 			pdev->stats.tx_i.mcast_en.ucast);
5105 	DP_PRINT_STATS("Raw:");
5106 	DP_PRINT_STATS("	Packets = %d",
5107 			pdev->stats.tx_i.raw.raw_pkt.num);
5108 	DP_PRINT_STATS("	Bytes = %llu",
5109 			pdev->stats.tx_i.raw.raw_pkt.bytes);
5110 	DP_PRINT_STATS("	DMA map error = %d",
5111 			pdev->stats.tx_i.raw.dma_map_error);
5112 	DP_PRINT_STATS("Reinjected:");
5113 	DP_PRINT_STATS("	Packets = %d",
5114 			pdev->stats.tx_i.reinject_pkts.num);
5115 	DP_PRINT_STATS("	Bytes = %llu\n",
5116 			pdev->stats.tx_i.reinject_pkts.bytes);
5117 	DP_PRINT_STATS("Inspected:");
5118 	DP_PRINT_STATS("	Packets = %d",
5119 			pdev->stats.tx_i.inspect_pkts.num);
5120 	DP_PRINT_STATS("	Bytes = %llu",
5121 			pdev->stats.tx_i.inspect_pkts.bytes);
5122 	DP_PRINT_STATS("Nawds Multicast:");
5123 	DP_PRINT_STATS("	Packets = %d",
5124 			pdev->stats.tx_i.nawds_mcast.num);
5125 	DP_PRINT_STATS("	Bytes = %llu",
5126 			pdev->stats.tx_i.nawds_mcast.bytes);
5127 	DP_PRINT_STATS("CCE Classified:");
5128 	DP_PRINT_STATS("	CCE Classified Packets: %u",
5129 			pdev->stats.tx_i.cce_classified);
5130 	DP_PRINT_STATS("	RAW CCE Classified Packets: %u",
5131 			pdev->stats.tx_i.cce_classified_raw);
5132 	DP_PRINT_STATS("Mesh stats:");
5133 	DP_PRINT_STATS("	frames to firmware: %u",
5134 			pdev->stats.tx_i.mesh.exception_fw);
5135 	DP_PRINT_STATS("	completions from fw: %u",
5136 			pdev->stats.tx_i.mesh.completion_fw);
5137 	DP_PRINT_STATS("PPDU stats counter");
5138 	for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
5139 		DP_PRINT_STATS("	Tag[%d] = %llu", index,
5140 				pdev->stats.ppdu_stats_counter[index]);
5141 	}
5142 }
5143 
5144 /**
5145  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
5146  * @pdev: DP_PDEV Handle
5147  *
5148  * Return: void
5149  */
5150 static inline void
5151 dp_print_pdev_rx_stats(struct dp_pdev *pdev)
5152 {
5153 	DP_PRINT_STATS("PDEV Rx Stats:\n");
5154 	DP_PRINT_STATS("Received From HW (Per Rx Ring):");
5155 	DP_PRINT_STATS("	Packets = %d %d %d %d",
5156 			pdev->stats.rx.rcvd_reo[0].num,
5157 			pdev->stats.rx.rcvd_reo[1].num,
5158 			pdev->stats.rx.rcvd_reo[2].num,
5159 			pdev->stats.rx.rcvd_reo[3].num);
5160 	DP_PRINT_STATS("	Bytes = %llu %llu %llu %llu",
5161 			pdev->stats.rx.rcvd_reo[0].bytes,
5162 			pdev->stats.rx.rcvd_reo[1].bytes,
5163 			pdev->stats.rx.rcvd_reo[2].bytes,
5164 			pdev->stats.rx.rcvd_reo[3].bytes);
5165 	DP_PRINT_STATS("Replenished:");
5166 	DP_PRINT_STATS("	Packets = %d",
5167 			pdev->stats.replenish.pkts.num);
5168 	DP_PRINT_STATS("	Bytes = %llu",
5169 			pdev->stats.replenish.pkts.bytes);
5170 	DP_PRINT_STATS("	Buffers Added To Freelist = %d",
5171 			pdev->stats.buf_freelist);
5172 	DP_PRINT_STATS("	Low threshold intr = %d",
5173 			pdev->stats.replenish.low_thresh_intrs);
5174 	DP_PRINT_STATS("Dropped:");
5175 	DP_PRINT_STATS("	msdu_not_done = %d",
5176 			pdev->stats.dropped.msdu_not_done);
5177 	DP_PRINT_STATS("        mon_rx_drop = %d",
5178 			pdev->stats.dropped.mon_rx_drop);
5179 	DP_PRINT_STATS("Sent To Stack:");
5180 	DP_PRINT_STATS("	Packets = %d",
5181 			pdev->stats.rx.to_stack.num);
5182 	DP_PRINT_STATS("	Bytes = %llu",
5183 			pdev->stats.rx.to_stack.bytes);
5184 	DP_PRINT_STATS("Multicast/Broadcast:");
5185 	DP_PRINT_STATS("	Packets = %d",
5186 			(pdev->stats.rx.multicast.num +
5187 			pdev->stats.rx.bcast.num));
5188 	DP_PRINT_STATS("	Bytes = %llu",
5189 			(pdev->stats.rx.multicast.bytes +
5190 			pdev->stats.rx.bcast.bytes));
5191 	DP_PRINT_STATS("Errors:");
5192 	DP_PRINT_STATS("	Rxdma Ring Un-inititalized = %d",
5193 			pdev->stats.replenish.rxdma_err);
5194 	DP_PRINT_STATS("	Desc Alloc Failed: = %d",
5195 			pdev->stats.err.desc_alloc_fail);
5196 	DP_PRINT_STATS("	IP checksum error = %d",
5197 		       pdev->stats.err.ip_csum_err);
5198 	DP_PRINT_STATS("	TCP/UDP checksum error = %d",
5199 		       pdev->stats.err.tcp_udp_csum_err);
5200 
5201 	/* Get bar_recv_cnt */
5202 	dp_aggregate_pdev_ctrl_frames_stats(pdev);
5203 	DP_PRINT_STATS("BAR Received Count: = %d",
5204 			pdev->stats.rx.bar_recv_cnt);
5205 
5206 }
5207 
5208 /**
5209  * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
5210  * @pdev: DP_PDEV Handle
5211  *
5212  * Return: void
5213  */
5214 static inline void
5215 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
5216 {
5217 	struct cdp_pdev_mon_stats *rx_mon_stats;
5218 
5219 	rx_mon_stats = &pdev->rx_mon_stats;
5220 
5221 	DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
5222 
5223 	dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
5224 
5225 	DP_PRINT_STATS("status_ppdu_done_cnt = %d",
5226 		       rx_mon_stats->status_ppdu_done);
5227 	DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
5228 		       rx_mon_stats->dest_ppdu_done);
5229 	DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
5230 		       rx_mon_stats->dest_mpdu_done);
5231 	DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
5232 		       rx_mon_stats->dest_mpdu_drop);
5233 }
5234 
5235 /**
5236  * dp_print_soc_tx_stats(): Print SOC level  stats
5237  * @soc DP_SOC Handle
5238  *
5239  * Return: void
5240  */
5241 static inline void
5242 dp_print_soc_tx_stats(struct dp_soc *soc)
5243 {
5244 	uint8_t desc_pool_id;
5245 	soc->stats.tx.desc_in_use = 0;
5246 
5247 	DP_PRINT_STATS("SOC Tx Stats:\n");
5248 
5249 	for (desc_pool_id = 0;
5250 	     desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5251 	     desc_pool_id++)
5252 		soc->stats.tx.desc_in_use +=
5253 			soc->tx_desc[desc_pool_id].num_allocated;
5254 
5255 	DP_PRINT_STATS("Tx Descriptors In Use = %d",
5256 			soc->stats.tx.desc_in_use);
5257 	DP_PRINT_STATS("Invalid peer:");
5258 	DP_PRINT_STATS("	Packets = %d",
5259 			soc->stats.tx.tx_invalid_peer.num);
5260 	DP_PRINT_STATS("	Bytes = %llu",
5261 			soc->stats.tx.tx_invalid_peer.bytes);
5262 	DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
5263 			soc->stats.tx.tcl_ring_full[0],
5264 			soc->stats.tx.tcl_ring_full[1],
5265 			soc->stats.tx.tcl_ring_full[2]);
5266 
5267 }
5268 /**
5269  * dp_print_soc_rx_stats: Print SOC level Rx stats
5270  * @soc: DP_SOC Handle
5271  *
5272  * Return:void
5273  */
5274 static inline void
5275 dp_print_soc_rx_stats(struct dp_soc *soc)
5276 {
5277 	uint32_t i;
5278 	char reo_error[DP_REO_ERR_LENGTH];
5279 	char rxdma_error[DP_RXDMA_ERR_LENGTH];
5280 	uint8_t index = 0;
5281 
5282 	DP_PRINT_STATS("SOC Rx Stats:\n");
5283 	DP_PRINT_STATS("Errors:\n");
5284 	DP_PRINT_STATS("Rx Decrypt Errors = %d",
5285 			(soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
5286 			soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
5287 	DP_PRINT_STATS("Invalid RBM = %d",
5288 			soc->stats.rx.err.invalid_rbm);
5289 	DP_PRINT_STATS("Invalid Vdev = %d",
5290 			soc->stats.rx.err.invalid_vdev);
5291 	DP_PRINT_STATS("Invalid Pdev = %d",
5292 			soc->stats.rx.err.invalid_pdev);
5293 	DP_PRINT_STATS("Invalid Peer = %d",
5294 			soc->stats.rx.err.rx_invalid_peer.num);
5295 	DP_PRINT_STATS("HAL Ring Access Fail = %d",
5296 			soc->stats.rx.err.hal_ring_access_fail);
5297 
5298 	for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
5299 		index += qdf_snprint(&rxdma_error[index],
5300 				DP_RXDMA_ERR_LENGTH - index,
5301 				" %d", soc->stats.rx.err.rxdma_error[i]);
5302 	}
5303 	DP_PRINT_STATS("RXDMA Error (0-31):%s",
5304 			rxdma_error);
5305 
5306 	index = 0;
5307 	for (i = 0; i < HAL_REO_ERR_MAX; i++) {
5308 		index += qdf_snprint(&reo_error[index],
5309 				DP_REO_ERR_LENGTH - index,
5310 				" %d", soc->stats.rx.err.reo_error[i]);
5311 	}
5312 	DP_PRINT_STATS("REO Error(0-14):%s",
5313 			reo_error);
5314 }
5315 
5316 
5317 /**
5318  * dp_print_ring_stat_from_hal(): Print hal level ring stats
5319  * @soc: DP_SOC handle
5320  * @srng: DP_SRNG handle
5321  * @ring_name: SRNG name
5322  *
5323  * Return: void
5324  */
5325 static inline void
5326 dp_print_ring_stat_from_hal(struct dp_soc *soc,  struct dp_srng *srng,
5327 	char *ring_name)
5328 {
5329 	uint32_t tailp;
5330 	uint32_t headp;
5331 
5332 	if (srng->hal_srng != NULL) {
5333 		hal_api_get_tphp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
5334 		DP_PRINT_STATS("%s : Head pointer = %d  Tail Pointer = %d\n",
5335 				ring_name, headp, tailp);
5336 	}
5337 }
5338 
5339 /**
5340  * dp_print_ring_stats(): Print tail and head pointer
5341  * @pdev: DP_PDEV handle
5342  *
5343  * Return:void
5344  */
5345 static inline void
5346 dp_print_ring_stats(struct dp_pdev *pdev)
5347 {
5348 	uint32_t i;
5349 	char ring_name[STR_MAXLEN + 1];
5350 	int mac_id;
5351 
5352 	dp_print_ring_stat_from_hal(pdev->soc,
5353 			&pdev->soc->reo_exception_ring,
5354 			"Reo Exception Ring");
5355 	dp_print_ring_stat_from_hal(pdev->soc,
5356 			&pdev->soc->reo_reinject_ring,
5357 			"Reo Inject Ring");
5358 	dp_print_ring_stat_from_hal(pdev->soc,
5359 			&pdev->soc->reo_cmd_ring,
5360 			"Reo Command Ring");
5361 	dp_print_ring_stat_from_hal(pdev->soc,
5362 			&pdev->soc->reo_status_ring,
5363 			"Reo Status Ring");
5364 	dp_print_ring_stat_from_hal(pdev->soc,
5365 			&pdev->soc->rx_rel_ring,
5366 			"Rx Release ring");
5367 	dp_print_ring_stat_from_hal(pdev->soc,
5368 			&pdev->soc->tcl_cmd_ring,
5369 			"Tcl command Ring");
5370 	dp_print_ring_stat_from_hal(pdev->soc,
5371 			&pdev->soc->tcl_status_ring,
5372 			"Tcl Status Ring");
5373 	dp_print_ring_stat_from_hal(pdev->soc,
5374 			&pdev->soc->wbm_desc_rel_ring,
5375 			"Wbm Desc Rel Ring");
5376 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5377 		snprintf(ring_name, STR_MAXLEN, "Reo Dest Ring %d", i);
5378 		dp_print_ring_stat_from_hal(pdev->soc,
5379 				&pdev->soc->reo_dest_ring[i],
5380 				ring_name);
5381 	}
5382 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
5383 		snprintf(ring_name, STR_MAXLEN, "Tcl Data Ring %d", i);
5384 		dp_print_ring_stat_from_hal(pdev->soc,
5385 				&pdev->soc->tcl_data_ring[i],
5386 				ring_name);
5387 	}
5388 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
5389 		snprintf(ring_name, STR_MAXLEN, "Tx Comp Ring %d", i);
5390 		dp_print_ring_stat_from_hal(pdev->soc,
5391 				&pdev->soc->tx_comp_ring[i],
5392 				ring_name);
5393 	}
5394 	dp_print_ring_stat_from_hal(pdev->soc,
5395 			&pdev->rx_refill_buf_ring,
5396 			"Rx Refill Buf Ring");
5397 
5398 	dp_print_ring_stat_from_hal(pdev->soc,
5399 			&pdev->rx_refill_buf_ring2,
5400 			"Second Rx Refill Buf Ring");
5401 
5402 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5403 		dp_print_ring_stat_from_hal(pdev->soc,
5404 				&pdev->rxdma_mon_buf_ring[mac_id],
5405 				"Rxdma Mon Buf Ring");
5406 		dp_print_ring_stat_from_hal(pdev->soc,
5407 				&pdev->rxdma_mon_dst_ring[mac_id],
5408 				"Rxdma Mon Dst Ring");
5409 		dp_print_ring_stat_from_hal(pdev->soc,
5410 				&pdev->rxdma_mon_status_ring[mac_id],
5411 				"Rxdma Mon Status Ring");
5412 		dp_print_ring_stat_from_hal(pdev->soc,
5413 				&pdev->rxdma_mon_desc_ring[mac_id],
5414 				"Rxdma mon desc Ring");
5415 	}
5416 
5417 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
5418 		snprintf(ring_name, STR_MAXLEN, "Rxdma err dst ring %d", i);
5419 		dp_print_ring_stat_from_hal(pdev->soc,
5420 			&pdev->rxdma_err_dst_ring[i],
5421 			ring_name);
5422 	}
5423 
5424 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
5425 		snprintf(ring_name, STR_MAXLEN, "Rx mac buf ring %d", i);
5426 		dp_print_ring_stat_from_hal(pdev->soc,
5427 				&pdev->rx_mac_buf_ring[i],
5428 				ring_name);
5429 	}
5430 }
5431 
5432 /**
5433  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
5434  * @vdev: DP_VDEV handle
5435  *
5436  * Return:void
5437  */
5438 static inline void
5439 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
5440 {
5441 	struct dp_peer *peer = NULL;
5442 	struct dp_soc *soc = (struct dp_soc *)vdev->pdev->soc;
5443 
5444 	DP_STATS_CLR(vdev->pdev);
5445 	DP_STATS_CLR(vdev->pdev->soc);
5446 	DP_STATS_CLR(vdev);
5447 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5448 		if (!peer)
5449 			return;
5450 		DP_STATS_CLR(peer);
5451 
5452 		if (soc->cdp_soc.ol_ops->update_dp_stats) {
5453 			soc->cdp_soc.ol_ops->update_dp_stats(
5454 					vdev->pdev->osif_pdev,
5455 					&peer->stats,
5456 					peer->peer_ids[0],
5457 					UPDATE_PEER_STATS);
5458 		}
5459 
5460 	}
5461 
5462 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5463 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->osif_pdev,
5464 				&vdev->stats, (uint16_t)vdev->vdev_id,
5465 				UPDATE_VDEV_STATS);
5466 }
5467 
5468 /**
5469  * dp_print_rx_rates(): Print Rx rate stats
5470  * @vdev: DP_VDEV handle
5471  *
5472  * Return:void
5473  */
5474 static inline void
5475 dp_print_rx_rates(struct dp_vdev *vdev)
5476 {
5477 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5478 	uint8_t i, mcs, pkt_type;
5479 	uint8_t index = 0;
5480 	char nss[DP_NSS_LENGTH];
5481 
5482 	DP_PRINT_STATS("Rx Rate Info:\n");
5483 
5484 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5485 		index = 0;
5486 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5487 			if (!dp_rate_string[pkt_type][mcs].valid)
5488 				continue;
5489 
5490 			DP_PRINT_STATS("	%s = %d",
5491 					dp_rate_string[pkt_type][mcs].mcs_type,
5492 					pdev->stats.rx.pkt_type[pkt_type].
5493 					mcs_count[mcs]);
5494 		}
5495 
5496 		DP_PRINT_STATS("\n");
5497 	}
5498 
5499 	index = 0;
5500 	for (i = 0; i < SS_COUNT; i++) {
5501 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5502 				" %d", pdev->stats.rx.nss[i]);
5503 	}
5504 	DP_PRINT_STATS("NSS(1-8) = %s",
5505 			nss);
5506 
5507 	DP_PRINT_STATS("SGI ="
5508 			" 0.8us %d,"
5509 			" 0.4us %d,"
5510 			" 1.6us %d,"
5511 			" 3.2us %d,",
5512 			pdev->stats.rx.sgi_count[0],
5513 			pdev->stats.rx.sgi_count[1],
5514 			pdev->stats.rx.sgi_count[2],
5515 			pdev->stats.rx.sgi_count[3]);
5516 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
5517 			pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
5518 			pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
5519 	DP_PRINT_STATS("Reception Type ="
5520 			" SU: %d,"
5521 			" MU_MIMO:%d,"
5522 			" MU_OFDMA:%d,"
5523 			" MU_OFDMA_MIMO:%d\n",
5524 			pdev->stats.rx.reception_type[0],
5525 			pdev->stats.rx.reception_type[1],
5526 			pdev->stats.rx.reception_type[2],
5527 			pdev->stats.rx.reception_type[3]);
5528 	DP_PRINT_STATS("Aggregation:\n");
5529 	DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
5530 			pdev->stats.rx.ampdu_cnt);
5531 	DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
5532 			pdev->stats.rx.non_ampdu_cnt);
5533 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
5534 			pdev->stats.rx.amsdu_cnt);
5535 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
5536 			pdev->stats.rx.non_amsdu_cnt);
5537 }
5538 
5539 /**
5540  * dp_print_tx_rates(): Print tx rates
5541  * @vdev: DP_VDEV handle
5542  *
5543  * Return:void
5544  */
5545 static inline void
5546 dp_print_tx_rates(struct dp_vdev *vdev)
5547 {
5548 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5549 	uint8_t mcs, pkt_type;
5550 	uint32_t index;
5551 
5552 	DP_PRINT_STATS("Tx Rate Info:\n");
5553 
5554 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5555 		index = 0;
5556 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5557 			if (!dp_rate_string[pkt_type][mcs].valid)
5558 				continue;
5559 
5560 			DP_PRINT_STATS("	%s = %d",
5561 					dp_rate_string[pkt_type][mcs].mcs_type,
5562 					pdev->stats.tx.pkt_type[pkt_type].
5563 					mcs_count[mcs]);
5564 		}
5565 
5566 		DP_PRINT_STATS("\n");
5567 	}
5568 
5569 	DP_PRINT_STATS("SGI ="
5570 			" 0.8us %d"
5571 			" 0.4us %d"
5572 			" 1.6us %d"
5573 			" 3.2us %d",
5574 			pdev->stats.tx.sgi_count[0],
5575 			pdev->stats.tx.sgi_count[1],
5576 			pdev->stats.tx.sgi_count[2],
5577 			pdev->stats.tx.sgi_count[3]);
5578 
5579 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
5580 			pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
5581 			pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
5582 
5583 	DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
5584 	DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
5585 	DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
5586 	DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
5587 	DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
5588 
5589 	DP_PRINT_STATS("Aggregation:\n");
5590 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
5591 			pdev->stats.tx.amsdu_cnt);
5592 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
5593 			pdev->stats.tx.non_amsdu_cnt);
5594 }
5595 
5596 /**
5597  * dp_print_peer_stats():print peer stats
5598  * @peer: DP_PEER handle
5599  *
5600  * return void
5601  */
5602 static inline void dp_print_peer_stats(struct dp_peer *peer)
5603 {
5604 	uint8_t i, mcs, pkt_type;
5605 	uint32_t index;
5606 	char nss[DP_NSS_LENGTH];
5607 	DP_PRINT_STATS("Node Tx Stats:\n");
5608 	DP_PRINT_STATS("Total Packet Completions = %d",
5609 			peer->stats.tx.comp_pkt.num);
5610 	DP_PRINT_STATS("Total Bytes Completions = %llu",
5611 			peer->stats.tx.comp_pkt.bytes);
5612 	DP_PRINT_STATS("Success Packets = %d",
5613 			peer->stats.tx.tx_success.num);
5614 	DP_PRINT_STATS("Success Bytes = %llu",
5615 			peer->stats.tx.tx_success.bytes);
5616 	DP_PRINT_STATS("Unicast Success Packets = %d",
5617 			peer->stats.tx.ucast.num);
5618 	DP_PRINT_STATS("Unicast Success Bytes = %llu",
5619 			peer->stats.tx.ucast.bytes);
5620 	DP_PRINT_STATS("Multicast Success Packets = %d",
5621 			peer->stats.tx.mcast.num);
5622 	DP_PRINT_STATS("Multicast Success Bytes = %llu",
5623 			peer->stats.tx.mcast.bytes);
5624 	DP_PRINT_STATS("Broadcast Success Packets = %d",
5625 			peer->stats.tx.bcast.num);
5626 	DP_PRINT_STATS("Broadcast Success Bytes = %llu",
5627 			peer->stats.tx.bcast.bytes);
5628 	DP_PRINT_STATS("Packets Failed = %d",
5629 			peer->stats.tx.tx_failed);
5630 	DP_PRINT_STATS("Packets In OFDMA = %d",
5631 			peer->stats.tx.ofdma);
5632 	DP_PRINT_STATS("Packets In STBC = %d",
5633 			peer->stats.tx.stbc);
5634 	DP_PRINT_STATS("Packets In LDPC = %d",
5635 			peer->stats.tx.ldpc);
5636 	DP_PRINT_STATS("Packet Retries = %d",
5637 			peer->stats.tx.retries);
5638 	DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
5639 			peer->stats.tx.amsdu_cnt);
5640 	DP_PRINT_STATS("Last Packet RSSI = %d",
5641 			peer->stats.tx.last_ack_rssi);
5642 	DP_PRINT_STATS("Dropped At FW: Removed = %d",
5643 			peer->stats.tx.dropped.fw_rem);
5644 	DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
5645 			peer->stats.tx.dropped.fw_rem_tx);
5646 	DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
5647 			peer->stats.tx.dropped.fw_rem_notx);
5648 	DP_PRINT_STATS("Dropped : Age Out = %d",
5649 			peer->stats.tx.dropped.age_out);
5650 	DP_PRINT_STATS("NAWDS : ");
5651 	DP_PRINT_STATS("	Nawds multicast Drop Tx Packet = %d",
5652 			peer->stats.tx.nawds_mcast_drop);
5653 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Count = %d",
5654 			peer->stats.tx.nawds_mcast.num);
5655 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Bytes = %llu",
5656 			peer->stats.tx.nawds_mcast.bytes);
5657 
5658 	DP_PRINT_STATS("Rate Info:");
5659 
5660 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5661 		index = 0;
5662 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5663 			if (!dp_rate_string[pkt_type][mcs].valid)
5664 				continue;
5665 
5666 			DP_PRINT_STATS("	%s = %d",
5667 					dp_rate_string[pkt_type][mcs].mcs_type,
5668 					peer->stats.tx.pkt_type[pkt_type].
5669 					mcs_count[mcs]);
5670 		}
5671 
5672 		DP_PRINT_STATS("\n");
5673 	}
5674 
5675 	DP_PRINT_STATS("SGI = "
5676 			" 0.8us %d"
5677 			" 0.4us %d"
5678 			" 1.6us %d"
5679 			" 3.2us %d",
5680 			peer->stats.tx.sgi_count[0],
5681 			peer->stats.tx.sgi_count[1],
5682 			peer->stats.tx.sgi_count[2],
5683 			peer->stats.tx.sgi_count[3]);
5684 	DP_PRINT_STATS("Excess Retries per AC ");
5685 	DP_PRINT_STATS("	 Best effort = %d",
5686 			peer->stats.tx.excess_retries_per_ac[0]);
5687 	DP_PRINT_STATS("	 Background= %d",
5688 			peer->stats.tx.excess_retries_per_ac[1]);
5689 	DP_PRINT_STATS("	 Video = %d",
5690 			peer->stats.tx.excess_retries_per_ac[2]);
5691 	DP_PRINT_STATS("	 Voice = %d",
5692 			peer->stats.tx.excess_retries_per_ac[3]);
5693 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
5694 			peer->stats.tx.bw[2], peer->stats.tx.bw[3],
5695 			peer->stats.tx.bw[4], peer->stats.tx.bw[5]);
5696 
5697 	index = 0;
5698 	for (i = 0; i < SS_COUNT; i++) {
5699 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5700 				" %d", peer->stats.tx.nss[i]);
5701 	}
5702 	DP_PRINT_STATS("NSS(1-8) = %s",
5703 			nss);
5704 
5705 	DP_PRINT_STATS("Aggregation:");
5706 	DP_PRINT_STATS("	Number of Msdu's Part of Amsdu = %d",
5707 			peer->stats.tx.amsdu_cnt);
5708 	DP_PRINT_STATS("	Number of Msdu's With No Msdu Level Aggregation = %d\n",
5709 			peer->stats.tx.non_amsdu_cnt);
5710 
5711 	DP_PRINT_STATS("Node Rx Stats:");
5712 	DP_PRINT_STATS("Packets Sent To Stack = %d",
5713 			peer->stats.rx.to_stack.num);
5714 	DP_PRINT_STATS("Bytes Sent To Stack = %llu",
5715 			peer->stats.rx.to_stack.bytes);
5716 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
5717 		DP_PRINT_STATS("Ring Id = %d", i);
5718 		DP_PRINT_STATS("	Packets Received = %d",
5719 				peer->stats.rx.rcvd_reo[i].num);
5720 		DP_PRINT_STATS("	Bytes Received = %llu",
5721 				peer->stats.rx.rcvd_reo[i].bytes);
5722 	}
5723 	DP_PRINT_STATS("Multicast Packets Received = %d",
5724 			peer->stats.rx.multicast.num);
5725 	DP_PRINT_STATS("Multicast Bytes Received = %llu",
5726 			peer->stats.rx.multicast.bytes);
5727 	DP_PRINT_STATS("Broadcast Packets Received = %d",
5728 			peer->stats.rx.bcast.num);
5729 	DP_PRINT_STATS("Broadcast Bytes Received = %llu",
5730 			peer->stats.rx.bcast.bytes);
5731 	DP_PRINT_STATS("Intra BSS Packets Received = %d",
5732 			peer->stats.rx.intra_bss.pkts.num);
5733 	DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
5734 			peer->stats.rx.intra_bss.pkts.bytes);
5735 	DP_PRINT_STATS("Raw Packets Received = %d",
5736 			peer->stats.rx.raw.num);
5737 	DP_PRINT_STATS("Raw Bytes Received = %llu",
5738 			peer->stats.rx.raw.bytes);
5739 	DP_PRINT_STATS("Errors: MIC Errors = %d",
5740 			peer->stats.rx.err.mic_err);
5741 	DP_PRINT_STATS("Erros: Decryption Errors = %d",
5742 			peer->stats.rx.err.decrypt_err);
5743 	DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
5744 			peer->stats.rx.non_ampdu_cnt);
5745 	DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
5746 			peer->stats.rx.ampdu_cnt);
5747 	DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
5748 			peer->stats.rx.non_amsdu_cnt);
5749 	DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
5750 			peer->stats.rx.amsdu_cnt);
5751 	DP_PRINT_STATS("NAWDS : ");
5752 	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet = %d",
5753 			peer->stats.rx.nawds_mcast_drop);
5754 	DP_PRINT_STATS("SGI ="
5755 			" 0.8us %d"
5756 			" 0.4us %d"
5757 			" 1.6us %d"
5758 			" 3.2us %d",
5759 			peer->stats.rx.sgi_count[0],
5760 			peer->stats.rx.sgi_count[1],
5761 			peer->stats.rx.sgi_count[2],
5762 			peer->stats.rx.sgi_count[3]);
5763 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
5764 			peer->stats.rx.bw[0], peer->stats.rx.bw[1],
5765 			peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
5766 	DP_PRINT_STATS("Reception Type ="
5767 			" SU %d,"
5768 			" MU_MIMO %d,"
5769 			" MU_OFDMA %d,"
5770 			" MU_OFDMA_MIMO %d",
5771 			peer->stats.rx.reception_type[0],
5772 			peer->stats.rx.reception_type[1],
5773 			peer->stats.rx.reception_type[2],
5774 			peer->stats.rx.reception_type[3]);
5775 
5776 
5777 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5778 		index = 0;
5779 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5780 			if (!dp_rate_string[pkt_type][mcs].valid)
5781 				continue;
5782 
5783 			DP_PRINT_STATS("	%s = %d",
5784 					dp_rate_string[pkt_type][mcs].mcs_type,
5785 					peer->stats.rx.pkt_type[pkt_type].
5786 					mcs_count[mcs]);
5787 		}
5788 
5789 		DP_PRINT_STATS("\n");
5790 	}
5791 
5792 	index = 0;
5793 	for (i = 0; i < SS_COUNT; i++) {
5794 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5795 				" %d", peer->stats.rx.nss[i]);
5796 	}
5797 	DP_PRINT_STATS("NSS(1-8) = %s",
5798 			nss);
5799 
5800 	DP_PRINT_STATS("Aggregation:");
5801 	DP_PRINT_STATS("	Msdu's Part of Ampdu = %d",
5802 			peer->stats.rx.ampdu_cnt);
5803 	DP_PRINT_STATS("	Msdu's With No Mpdu Level Aggregation = %d",
5804 			peer->stats.rx.non_ampdu_cnt);
5805 	DP_PRINT_STATS("	Msdu's Part of Amsdu = %d",
5806 			peer->stats.rx.amsdu_cnt);
5807 	DP_PRINT_STATS("	Msdu's With No Msdu Level Aggregation = %d",
5808 			peer->stats.rx.non_amsdu_cnt);
5809 }
5810 
5811 /**
5812  * dp_print_host_stats()- Function to print the stats aggregated at host
5813  * @vdev_handle: DP_VDEV handle
5814  * @type: host stats type
5815  *
5816  * Available Stat types
5817  * TXRX_CLEAR_STATS  : Clear the stats
5818  * TXRX_RX_RATE_STATS: Print Rx Rate Info
5819  * TXRX_TX_RATE_STATS: Print Tx Rate Info
5820  * TXRX_TX_HOST_STATS: Print Tx Stats
5821  * TXRX_RX_HOST_STATS: Print Rx Stats
5822  * TXRX_AST_STATS: Print AST Stats
5823  * TXRX_SRNG_PTR_STATS: Print SRNG ring pointer stats
5824  *
5825  * Return: 0 on success, print error message in case of failure
5826  */
5827 static int
5828 dp_print_host_stats(struct cdp_vdev *vdev_handle, enum cdp_host_txrx_stats type)
5829 {
5830 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5831 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5832 
5833 	dp_aggregate_pdev_stats(pdev);
5834 
5835 	switch (type) {
5836 	case TXRX_CLEAR_STATS:
5837 		dp_txrx_host_stats_clr(vdev);
5838 		break;
5839 	case TXRX_RX_RATE_STATS:
5840 		dp_print_rx_rates(vdev);
5841 		break;
5842 	case TXRX_TX_RATE_STATS:
5843 		dp_print_tx_rates(vdev);
5844 		break;
5845 	case TXRX_TX_HOST_STATS:
5846 		dp_print_pdev_tx_stats(pdev);
5847 		dp_print_soc_tx_stats(pdev->soc);
5848 		break;
5849 	case TXRX_RX_HOST_STATS:
5850 		dp_print_pdev_rx_stats(pdev);
5851 		dp_print_soc_rx_stats(pdev->soc);
5852 		break;
5853 	case TXRX_AST_STATS:
5854 		dp_print_ast_stats(pdev->soc);
5855 		dp_print_peer_table(vdev);
5856 		break;
5857 	case TXRX_SRNG_PTR_STATS:
5858 		dp_print_ring_stats(pdev);
5859 		break;
5860 	case TXRX_RX_MON_STATS:
5861 		dp_print_pdev_rx_mon_stats(pdev);
5862 		break;
5863 	default:
5864 		DP_TRACE(FATAL, "Wrong Input For TxRx Host Stats");
5865 		break;
5866 	}
5867 	return 0;
5868 }
5869 
5870 /*
5871  * dp_get_host_peer_stats()- function to print peer stats
5872  * @pdev_handle: DP_PDEV handle
5873  * @mac_addr: mac address of the peer
5874  *
5875  * Return: void
5876  */
5877 static void
5878 dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
5879 {
5880 	struct dp_peer *peer;
5881 	uint8_t local_id;
5882 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
5883 			&local_id);
5884 
5885 	if (!peer) {
5886 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5887 			"%s: Invalid peer\n", __func__);
5888 		return;
5889 	}
5890 
5891 	dp_print_peer_stats(peer);
5892 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
5893 	return;
5894 }
5895 
5896 /*
5897  * dp_ppdu_ring_reset()- Reset PPDU Stats ring
5898  * @pdev: DP_PDEV handle
5899  *
5900  * Return: void
5901  */
5902 static void
5903 dp_ppdu_ring_reset(struct dp_pdev *pdev)
5904 {
5905 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5906 	int mac_id;
5907 
5908 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5909 
5910 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5911 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5912 							pdev->pdev_id);
5913 
5914 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
5915 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5916 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5917 	}
5918 }
5919 
5920 /*
5921  * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
5922  * @pdev: DP_PDEV handle
5923  *
5924  * Return: void
5925  */
5926 static void
5927 dp_ppdu_ring_cfg(struct dp_pdev *pdev)
5928 {
5929 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
5930 	int mac_id;
5931 
5932 	htt_tlv_filter.mpdu_start = 1;
5933 	htt_tlv_filter.msdu_start = 0;
5934 	htt_tlv_filter.packet = 0;
5935 	htt_tlv_filter.msdu_end = 0;
5936 	htt_tlv_filter.mpdu_end = 0;
5937 	htt_tlv_filter.attention = 0;
5938 	htt_tlv_filter.ppdu_start = 1;
5939 	htt_tlv_filter.ppdu_end = 1;
5940 	htt_tlv_filter.ppdu_end_user_stats = 1;
5941 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5942 	htt_tlv_filter.ppdu_end_status_done = 1;
5943 	htt_tlv_filter.enable_fp = 1;
5944 	htt_tlv_filter.enable_md = 0;
5945 	if (pdev->mcopy_mode) {
5946 		htt_tlv_filter.packet_header = 1;
5947 		htt_tlv_filter.enable_mo = 1;
5948 	}
5949 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5950 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5951 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5952 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5953 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5954 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
5955 
5956 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5957 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5958 						pdev->pdev_id);
5959 
5960 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
5961 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5962 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5963 	}
5964 }
5965 
5966 /*
5967  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
5968  * @pdev_handle: DP_PDEV handle
5969  * @val: user provided value
5970  *
5971  * Return: void
5972  */
5973 static void
5974 dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
5975 {
5976 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5977 
5978 	switch (val) {
5979 	case 0:
5980 		pdev->tx_sniffer_enable = 0;
5981 		pdev->mcopy_mode = 0;
5982 
5983 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en) {
5984 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
5985 			dp_ppdu_ring_reset(pdev);
5986 		} else if (pdev->enhanced_stats_en) {
5987 			dp_h2t_cfg_stats_msg_send(pdev,
5988 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
5989 		}
5990 		break;
5991 
5992 	case 1:
5993 		pdev->tx_sniffer_enable = 1;
5994 		pdev->mcopy_mode = 0;
5995 
5996 		if (!pdev->pktlog_ppdu_stats)
5997 			dp_h2t_cfg_stats_msg_send(pdev,
5998 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
5999 		break;
6000 	case 2:
6001 		pdev->mcopy_mode = 1;
6002 		pdev->tx_sniffer_enable = 0;
6003 		if (!pdev->enhanced_stats_en)
6004 			dp_ppdu_ring_cfg(pdev);
6005 
6006 		if (!pdev->pktlog_ppdu_stats)
6007 			dp_h2t_cfg_stats_msg_send(pdev,
6008 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
6009 		break;
6010 	default:
6011 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6012 			"Invalid value\n");
6013 		break;
6014 	}
6015 }
6016 
6017 /*
6018  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
6019  * @pdev_handle: DP_PDEV handle
6020  *
6021  * Return: void
6022  */
6023 static void
6024 dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
6025 {
6026 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6027 	pdev->enhanced_stats_en = 1;
6028 
6029 	if (!pdev->mcopy_mode)
6030 		dp_ppdu_ring_cfg(pdev);
6031 
6032 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable && !pdev->mcopy_mode)
6033 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
6034 }
6035 
6036 /*
6037  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
6038  * @pdev_handle: DP_PDEV handle
6039  *
6040  * Return: void
6041  */
6042 static void
6043 dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
6044 {
6045 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6046 
6047 	pdev->enhanced_stats_en = 0;
6048 
6049 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable && !pdev->mcopy_mode)
6050 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6051 
6052 	if (!pdev->mcopy_mode)
6053 		dp_ppdu_ring_reset(pdev);
6054 }
6055 
6056 /*
6057  * dp_get_fw_peer_stats()- function to print peer stats
6058  * @pdev_handle: DP_PDEV handle
6059  * @mac_addr: mac address of the peer
6060  * @cap: Type of htt stats requested
6061  *
6062  * Currently Supporting only MAC ID based requests Only
6063  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
6064  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
6065  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
6066  *
6067  * Return: void
6068  */
6069 static void
6070 dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
6071 		uint32_t cap)
6072 {
6073 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6074 	int i;
6075 	uint32_t config_param0 = 0;
6076 	uint32_t config_param1 = 0;
6077 	uint32_t config_param2 = 0;
6078 	uint32_t config_param3 = 0;
6079 
6080 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
6081 	config_param0 |= (1 << (cap + 1));
6082 
6083 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
6084 		config_param1 |= (1 << i);
6085 	}
6086 
6087 	config_param2 |= (mac_addr[0] & 0x000000ff);
6088 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
6089 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
6090 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
6091 
6092 	config_param3 |= (mac_addr[4] & 0x000000ff);
6093 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
6094 
6095 	dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
6096 			config_param0, config_param1, config_param2,
6097 			config_param3, 0, 0, 0);
6098 
6099 }
6100 
6101 /* This struct definition will be removed from here
6102  * once it get added in FW headers*/
6103 struct httstats_cmd_req {
6104     uint32_t    config_param0;
6105     uint32_t    config_param1;
6106     uint32_t    config_param2;
6107     uint32_t    config_param3;
6108     int cookie;
6109     u_int8_t    stats_id;
6110 };
6111 
6112 /*
6113  * dp_get_htt_stats: function to process the httstas request
6114  * @pdev_handle: DP pdev handle
6115  * @data: pointer to request data
6116  * @data_len: length for request data
6117  *
6118  * return: void
6119  */
6120 static void
6121 dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
6122 {
6123 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6124 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
6125 
6126 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
6127 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
6128 				req->config_param0, req->config_param1,
6129 				req->config_param2, req->config_param3,
6130 				req->cookie, 0, 0);
6131 }
6132 /*
6133  * dp_set_pdev_param: function to set parameters in pdev
6134  * @pdev_handle: DP pdev handle
6135  * @param: parameter type to be set
6136  * @val: value of parameter to be set
6137  *
6138  * return: void
6139  */
6140 static void dp_set_pdev_param(struct cdp_pdev *pdev_handle,
6141 		enum cdp_pdev_param_type param, uint8_t val)
6142 {
6143 	switch (param) {
6144 	case CDP_CONFIG_DEBUG_SNIFFER:
6145 		dp_config_debug_sniffer(pdev_handle, val);
6146 		break;
6147 	default:
6148 		break;
6149 	}
6150 }
6151 
6152 /*
6153  * dp_set_vdev_param: function to set parameters in vdev
6154  * @param: parameter type to be set
6155  * @val: value of parameter to be set
6156  *
6157  * return: void
6158  */
6159 static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
6160 		enum cdp_vdev_param_type param, uint32_t val)
6161 {
6162 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6163 	switch (param) {
6164 	case CDP_ENABLE_WDS:
6165 		vdev->wds_enabled = val;
6166 		break;
6167 	case CDP_ENABLE_NAWDS:
6168 		vdev->nawds_enabled = val;
6169 		break;
6170 	case CDP_ENABLE_MCAST_EN:
6171 		vdev->mcast_enhancement_en = val;
6172 		break;
6173 	case CDP_ENABLE_PROXYSTA:
6174 		vdev->proxysta_vdev = val;
6175 		break;
6176 	case CDP_UPDATE_TDLS_FLAGS:
6177 		vdev->tdls_link_connected = val;
6178 		break;
6179 	case CDP_CFG_WDS_AGING_TIMER:
6180 		if (val == 0)
6181 			qdf_timer_stop(&vdev->pdev->soc->wds_aging_timer);
6182 		else if (val != vdev->wds_aging_timer_val)
6183 			qdf_timer_mod(&vdev->pdev->soc->wds_aging_timer, val);
6184 
6185 		vdev->wds_aging_timer_val = val;
6186 		break;
6187 	case CDP_ENABLE_AP_BRIDGE:
6188 		if (wlan_op_mode_sta != vdev->opmode)
6189 			vdev->ap_bridge_enabled = val;
6190 		else
6191 			vdev->ap_bridge_enabled = false;
6192 		break;
6193 	case CDP_ENABLE_CIPHER:
6194 		vdev->sec_type = val;
6195 		break;
6196 	case CDP_ENABLE_QWRAP_ISOLATION:
6197 		vdev->isolation_vdev = val;
6198 		break;
6199 	default:
6200 		break;
6201 	}
6202 
6203 	dp_tx_vdev_update_search_flags(vdev);
6204 }
6205 
6206 /**
6207  * dp_peer_set_nawds: set nawds bit in peer
6208  * @peer_handle: pointer to peer
6209  * @value: enable/disable nawds
6210  *
6211  * return: void
6212  */
6213 static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
6214 {
6215 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6216 	peer->nawds_enabled = value;
6217 }
6218 
6219 /*
6220  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
6221  * @vdev_handle: DP_VDEV handle
6222  * @map_id:ID of map that needs to be updated
6223  *
6224  * Return: void
6225  */
6226 static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
6227 		uint8_t map_id)
6228 {
6229 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6230 	vdev->dscp_tid_map_id = map_id;
6231 	return;
6232 }
6233 
6234 /*
6235  * dp_txrx_stats_publish(): publish pdev stats into a buffer
6236  * @pdev_handle: DP_PDEV handle
6237  * @buf: to hold pdev_stats
6238  *
6239  * Return: int
6240  */
6241 static int
6242 dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
6243 {
6244 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6245 	struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
6246 	struct cdp_txrx_stats_req req = {0,};
6247 
6248 	dp_aggregate_pdev_stats(pdev);
6249 	req.stats = HTT_DBG_EXT_STATS_PDEV_TX;
6250 	req.cookie_val = 1;
6251 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
6252 				req.param1, req.param2, req.param3, 0,
6253 				req.cookie_val, 0);
6254 
6255 	msleep(DP_MAX_SLEEP_TIME);
6256 
6257 	req.stats = HTT_DBG_EXT_STATS_PDEV_RX;
6258 	req.cookie_val = 1;
6259 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
6260 				req.param1, req.param2, req.param3, 0,
6261 				req.cookie_val, 0);
6262 
6263 	msleep(DP_MAX_SLEEP_TIME);
6264 	qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
6265 
6266 	return TXRX_STATS_LEVEL;
6267 }
6268 
6269 /**
6270  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
6271  * @pdev: DP_PDEV handle
6272  * @map_id: ID of map that needs to be updated
6273  * @tos: index value in map
6274  * @tid: tid value passed by the user
6275  *
6276  * Return: void
6277  */
6278 static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
6279 		uint8_t map_id, uint8_t tos, uint8_t tid)
6280 {
6281 	uint8_t dscp;
6282 	struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
6283 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
6284 	pdev->dscp_tid_map[map_id][dscp] = tid;
6285 	if (map_id < HAL_MAX_HW_DSCP_TID_MAPS)
6286 		hal_tx_update_dscp_tid(pdev->soc->hal_soc, tid,
6287 			map_id, dscp);
6288 	return;
6289 }
6290 
6291 /**
6292  * dp_fw_stats_process(): Process TxRX FW stats request
6293  * @vdev_handle: DP VDEV handle
6294  * @req: stats request
6295  *
6296  * return: int
6297  */
6298 static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
6299 		struct cdp_txrx_stats_req *req)
6300 {
6301 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6302 	struct dp_pdev *pdev = NULL;
6303 	uint32_t stats = req->stats;
6304 	uint8_t mac_id = req->mac_id;
6305 
6306 	if (!vdev) {
6307 		DP_TRACE(NONE, "VDEV not found");
6308 		return 1;
6309 	}
6310 	pdev = vdev->pdev;
6311 
6312 	/*
6313 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
6314 	 * from param0 to param3 according to below rule:
6315 	 *
6316 	 * PARAM:
6317 	 *   - config_param0 : start_offset (stats type)
6318 	 *   - config_param1 : stats bmask from start offset
6319 	 *   - config_param2 : stats bmask from start offset + 32
6320 	 *   - config_param3 : stats bmask from start offset + 64
6321 	 */
6322 	if (req->stats == CDP_TXRX_STATS_0) {
6323 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
6324 		req->param1 = 0xFFFFFFFF;
6325 		req->param2 = 0xFFFFFFFF;
6326 		req->param3 = 0xFFFFFFFF;
6327 	}
6328 
6329 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
6330 				req->param1, req->param2, req->param3,
6331 				0, 0, mac_id);
6332 }
6333 
6334 /**
6335  * dp_txrx_stats_request - function to map to firmware and host stats
6336  * @vdev: virtual handle
6337  * @req: stats request
6338  *
6339  * Return: integer
6340  */
6341 static int dp_txrx_stats_request(struct cdp_vdev *vdev,
6342 		struct cdp_txrx_stats_req *req)
6343 {
6344 	int host_stats;
6345 	int fw_stats;
6346 	enum cdp_stats stats;
6347 
6348 	if (!vdev || !req) {
6349 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6350 				"Invalid vdev/req instance");
6351 		return 0;
6352 	}
6353 
6354 	stats = req->stats;
6355 	if (stats >= CDP_TXRX_MAX_STATS)
6356 		return 0;
6357 
6358 	/*
6359 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
6360 	 *			has to be updated if new FW HTT stats added
6361 	 */
6362 	if (stats > CDP_TXRX_STATS_HTT_MAX)
6363 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
6364 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
6365 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
6366 
6367 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6368 		 "stats: %u fw_stats_type: %d host_stats_type: %d",
6369 		  stats, fw_stats, host_stats);
6370 
6371 	if (fw_stats != TXRX_FW_STATS_INVALID) {
6372 		/* update request with FW stats type */
6373 		req->stats = fw_stats;
6374 		return dp_fw_stats_process(vdev, req);
6375 	}
6376 
6377 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
6378 			(host_stats <= TXRX_HOST_STATS_MAX))
6379 		return dp_print_host_stats(vdev, host_stats);
6380 	else
6381 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6382 				"Wrong Input for TxRx Stats");
6383 
6384 	return 0;
6385 }
6386 
6387 /*
6388  * dp_print_napi_stats(): NAPI stats
6389  * @soc - soc handle
6390  */
6391 static void dp_print_napi_stats(struct dp_soc *soc)
6392 {
6393 	hif_print_napi_stats(soc->hif_handle);
6394 }
6395 
6396 /*
6397  * dp_print_per_ring_stats(): Packet count per ring
6398  * @soc - soc handle
6399  */
6400 static void dp_print_per_ring_stats(struct dp_soc *soc)
6401 {
6402 	uint8_t ring;
6403 	uint16_t core;
6404 	uint64_t total_packets;
6405 
6406 	DP_TRACE(FATAL, "Reo packets per ring:");
6407 	for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
6408 		total_packets = 0;
6409 		DP_TRACE(FATAL, "Packets on ring %u:", ring);
6410 		for (core = 0; core < NR_CPUS; core++) {
6411 			DP_TRACE(FATAL, "Packets arriving on core %u: %llu",
6412 				core, soc->stats.rx.ring_packets[core][ring]);
6413 			total_packets += soc->stats.rx.ring_packets[core][ring];
6414 		}
6415 		DP_TRACE(FATAL, "Total packets on ring %u: %llu",
6416 			ring, total_packets);
6417 	}
6418 }
6419 
6420 /*
6421  * dp_txrx_path_stats() - Function to display dump stats
6422  * @soc - soc handle
6423  *
6424  * return: none
6425  */
6426 static void dp_txrx_path_stats(struct dp_soc *soc)
6427 {
6428 	uint8_t error_code;
6429 	uint8_t loop_pdev;
6430 	struct dp_pdev *pdev;
6431 	uint8_t i;
6432 
6433 	for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
6434 
6435 		pdev = soc->pdev_list[loop_pdev];
6436 		dp_aggregate_pdev_stats(pdev);
6437 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6438 			"Tx path Statistics:");
6439 
6440 		DP_TRACE(FATAL, "from stack: %u msdus (%llu bytes)",
6441 			pdev->stats.tx_i.rcvd.num,
6442 			pdev->stats.tx_i.rcvd.bytes);
6443 		DP_TRACE(FATAL, "processed from host: %u msdus (%llu bytes)",
6444 			pdev->stats.tx_i.processed.num,
6445 			pdev->stats.tx_i.processed.bytes);
6446 		DP_TRACE(FATAL, "successfully transmitted: %u msdus (%llu bytes)",
6447 			pdev->stats.tx.tx_success.num,
6448 			pdev->stats.tx.tx_success.bytes);
6449 
6450 		DP_TRACE(FATAL, "Dropped in host:");
6451 		DP_TRACE(FATAL, "Total packets dropped: %u,",
6452 			pdev->stats.tx_i.dropped.dropped_pkt.num);
6453 		DP_TRACE(FATAL, "Descriptor not available: %u",
6454 			pdev->stats.tx_i.dropped.desc_na);
6455 		DP_TRACE(FATAL, "Ring full: %u",
6456 			pdev->stats.tx_i.dropped.ring_full);
6457 		DP_TRACE(FATAL, "Enqueue fail: %u",
6458 			pdev->stats.tx_i.dropped.enqueue_fail);
6459 		DP_TRACE(FATAL, "DMA Error: %u",
6460 			pdev->stats.tx_i.dropped.dma_error);
6461 
6462 		DP_TRACE(FATAL, "Dropped in hardware:");
6463 		DP_TRACE(FATAL, "total packets dropped: %u",
6464 			pdev->stats.tx.tx_failed);
6465 		DP_TRACE(FATAL, "mpdu age out: %u",
6466 			pdev->stats.tx.dropped.age_out);
6467 		DP_TRACE(FATAL, "firmware removed: %u",
6468 			pdev->stats.tx.dropped.fw_rem);
6469 		DP_TRACE(FATAL, "firmware removed tx: %u",
6470 			pdev->stats.tx.dropped.fw_rem_tx);
6471 		DP_TRACE(FATAL, "firmware removed notx %u",
6472 			pdev->stats.tx.dropped.fw_rem_notx);
6473 		DP_TRACE(FATAL, "peer_invalid: %u",
6474 			pdev->soc->stats.tx.tx_invalid_peer.num);
6475 
6476 
6477 		DP_TRACE(FATAL, "Tx packets sent per interrupt:");
6478 		DP_TRACE(FATAL, "Single Packet: %u",
6479 			pdev->stats.tx_comp_histogram.pkts_1);
6480 		DP_TRACE(FATAL, "2-20 Packets:  %u",
6481 			pdev->stats.tx_comp_histogram.pkts_2_20);
6482 		DP_TRACE(FATAL, "21-40 Packets: %u",
6483 			pdev->stats.tx_comp_histogram.pkts_21_40);
6484 		DP_TRACE(FATAL, "41-60 Packets: %u",
6485 			pdev->stats.tx_comp_histogram.pkts_41_60);
6486 		DP_TRACE(FATAL, "61-80 Packets: %u",
6487 			pdev->stats.tx_comp_histogram.pkts_61_80);
6488 		DP_TRACE(FATAL, "81-100 Packets: %u",
6489 			pdev->stats.tx_comp_histogram.pkts_81_100);
6490 		DP_TRACE(FATAL, "101-200 Packets: %u",
6491 			pdev->stats.tx_comp_histogram.pkts_101_200);
6492 		DP_TRACE(FATAL, "   201+ Packets: %u",
6493 			pdev->stats.tx_comp_histogram.pkts_201_plus);
6494 
6495 		DP_TRACE(FATAL, "Rx path statistics");
6496 
6497 		DP_TRACE(FATAL, "delivered %u msdus ( %llu bytes),",
6498 			pdev->stats.rx.to_stack.num,
6499 			pdev->stats.rx.to_stack.bytes);
6500 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)
6501 			DP_TRACE(FATAL, "received on reo[%d] %u msdus ( %llu bytes),",
6502 					i, pdev->stats.rx.rcvd_reo[i].num,
6503 					pdev->stats.rx.rcvd_reo[i].bytes);
6504 		DP_TRACE(FATAL, "intra-bss packets %u msdus ( %llu bytes),",
6505 			pdev->stats.rx.intra_bss.pkts.num,
6506 			pdev->stats.rx.intra_bss.pkts.bytes);
6507 		DP_TRACE(FATAL, "intra-bss fails %u msdus ( %llu bytes),",
6508 			pdev->stats.rx.intra_bss.fail.num,
6509 			pdev->stats.rx.intra_bss.fail.bytes);
6510 		DP_TRACE(FATAL, "raw packets %u msdus ( %llu bytes),",
6511 			pdev->stats.rx.raw.num,
6512 			pdev->stats.rx.raw.bytes);
6513 		DP_TRACE(FATAL, "dropped: error %u msdus",
6514 			pdev->stats.rx.err.mic_err);
6515 		DP_TRACE(FATAL, "peer invalid %u",
6516 			pdev->soc->stats.rx.err.rx_invalid_peer.num);
6517 
6518 		DP_TRACE(FATAL, "Reo Statistics");
6519 		DP_TRACE(FATAL, "rbm error: %u msdus",
6520 			pdev->soc->stats.rx.err.invalid_rbm);
6521 		DP_TRACE(FATAL, "hal ring access fail: %u msdus",
6522 			pdev->soc->stats.rx.err.hal_ring_access_fail);
6523 
6524 		DP_TRACE(FATAL, "Reo errors");
6525 
6526 		for (error_code = 0; error_code < HAL_REO_ERR_MAX;
6527 				error_code++) {
6528 			DP_TRACE(FATAL, "Reo error number (%u): %u msdus",
6529 				error_code,
6530 				pdev->soc->stats.rx.err.reo_error[error_code]);
6531 		}
6532 
6533 		for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
6534 				error_code++) {
6535 			DP_TRACE(FATAL, "Rxdma error number (%u): %u msdus",
6536 				error_code,
6537 				pdev->soc->stats.rx.err
6538 				.rxdma_error[error_code]);
6539 		}
6540 
6541 		DP_TRACE(FATAL, "Rx packets reaped per interrupt:");
6542 		DP_TRACE(FATAL, "Single Packet: %u",
6543 			 pdev->stats.rx_ind_histogram.pkts_1);
6544 		DP_TRACE(FATAL, "2-20 Packets:  %u",
6545 			 pdev->stats.rx_ind_histogram.pkts_2_20);
6546 		DP_TRACE(FATAL, "21-40 Packets: %u",
6547 			 pdev->stats.rx_ind_histogram.pkts_21_40);
6548 		DP_TRACE(FATAL, "41-60 Packets: %u",
6549 			 pdev->stats.rx_ind_histogram.pkts_41_60);
6550 		DP_TRACE(FATAL, "61-80 Packets: %u",
6551 			 pdev->stats.rx_ind_histogram.pkts_61_80);
6552 		DP_TRACE(FATAL, "81-100 Packets: %u",
6553 			 pdev->stats.rx_ind_histogram.pkts_81_100);
6554 		DP_TRACE(FATAL, "101-200 Packets: %u",
6555 			 pdev->stats.rx_ind_histogram.pkts_101_200);
6556 		DP_TRACE(FATAL, "   201+ Packets: %u",
6557 			 pdev->stats.rx_ind_histogram.pkts_201_plus);
6558 
6559 		DP_TRACE_STATS(ERROR, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
6560 			__func__,
6561 			pdev->soc->wlan_cfg_ctx->tso_enabled,
6562 			pdev->soc->wlan_cfg_ctx->lro_enabled,
6563 			pdev->soc->wlan_cfg_ctx->rx_hash,
6564 			pdev->soc->wlan_cfg_ctx->napi_enabled);
6565 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6566 		DP_TRACE_STATS(ERROR, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
6567 			__func__,
6568 			pdev->soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold,
6569 			pdev->soc->wlan_cfg_ctx->tx_flow_start_queue_offset);
6570 #endif
6571 	}
6572 }
6573 
6574 /*
6575  * dp_txrx_dump_stats() -  Dump statistics
6576  * @value - Statistics option
6577  */
6578 static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
6579 				     enum qdf_stats_verbosity_level level)
6580 {
6581 	struct dp_soc *soc =
6582 		(struct dp_soc *)psoc;
6583 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6584 
6585 	if (!soc) {
6586 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6587 			"%s: soc is NULL", __func__);
6588 		return QDF_STATUS_E_INVAL;
6589 	}
6590 
6591 	switch (value) {
6592 	case CDP_TXRX_PATH_STATS:
6593 		dp_txrx_path_stats(soc);
6594 		break;
6595 
6596 	case CDP_RX_RING_STATS:
6597 		dp_print_per_ring_stats(soc);
6598 		break;
6599 
6600 	case CDP_TXRX_TSO_STATS:
6601 		/* TODO: NOT IMPLEMENTED */
6602 		break;
6603 
6604 	case CDP_DUMP_TX_FLOW_POOL_INFO:
6605 		cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
6606 		break;
6607 
6608 	case CDP_DP_NAPI_STATS:
6609 		dp_print_napi_stats(soc);
6610 		break;
6611 
6612 	case CDP_TXRX_DESC_STATS:
6613 		/* TODO: NOT IMPLEMENTED */
6614 		break;
6615 
6616 	default:
6617 		status = QDF_STATUS_E_INVAL;
6618 		break;
6619 	}
6620 
6621 	return status;
6622 
6623 }
6624 
6625 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6626 /**
6627  * dp_update_flow_control_parameters() - API to store datapath
6628  *                            config parameters
6629  * @soc: soc handle
6630  * @cfg: ini parameter handle
6631  *
6632  * Return: void
6633  */
6634 static inline
6635 void dp_update_flow_control_parameters(struct dp_soc *soc,
6636 				struct cdp_config_params *params)
6637 {
6638 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
6639 					params->tx_flow_stop_queue_threshold;
6640 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
6641 					params->tx_flow_start_queue_offset;
6642 }
6643 #else
6644 static inline
6645 void dp_update_flow_control_parameters(struct dp_soc *soc,
6646 				struct cdp_config_params *params)
6647 {
6648 }
6649 #endif
6650 
6651 /**
6652  * dp_update_config_parameters() - API to store datapath
6653  *                            config parameters
6654  * @soc: soc handle
6655  * @cfg: ini parameter handle
6656  *
6657  * Return: status
6658  */
6659 static
6660 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
6661 				struct cdp_config_params *params)
6662 {
6663 	struct dp_soc *soc = (struct dp_soc *)psoc;
6664 
6665 	if (!(soc)) {
6666 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6667 				"%s: Invalid handle", __func__);
6668 		return QDF_STATUS_E_INVAL;
6669 	}
6670 
6671 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
6672 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
6673 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
6674 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
6675 				params->tcp_udp_checksumoffload;
6676 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
6677 
6678 	dp_update_flow_control_parameters(soc, params);
6679 
6680 	return QDF_STATUS_SUCCESS;
6681 }
6682 
6683 /**
6684  * dp_txrx_set_wds_rx_policy() - API to store datapath
6685  *                            config parameters
6686  * @vdev_handle - datapath vdev handle
6687  * @cfg: ini parameter handle
6688  *
6689  * Return: status
6690  */
6691 #ifdef WDS_VENDOR_EXTENSION
6692 void
6693 dp_txrx_set_wds_rx_policy(
6694 		struct cdp_vdev *vdev_handle,
6695 		u_int32_t val)
6696 {
6697 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6698 	struct dp_peer *peer;
6699 	if (vdev->opmode == wlan_op_mode_ap) {
6700 		/* for ap, set it on bss_peer */
6701 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6702 			if (peer->bss_peer) {
6703 				peer->wds_ecm.wds_rx_filter = 1;
6704 				peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
6705 				peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
6706 				break;
6707 			}
6708 		}
6709 	} else if (vdev->opmode == wlan_op_mode_sta) {
6710 		peer = TAILQ_FIRST(&vdev->peer_list);
6711 		peer->wds_ecm.wds_rx_filter = 1;
6712 		peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
6713 		peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
6714 	}
6715 }
6716 
6717 /**
6718  * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
6719  *
6720  * @peer_handle - datapath peer handle
6721  * @wds_tx_ucast: policy for unicast transmission
6722  * @wds_tx_mcast: policy for multicast transmission
6723  *
6724  * Return: void
6725  */
6726 void
6727 dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
6728 		int wds_tx_ucast, int wds_tx_mcast)
6729 {
6730 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6731 	if (wds_tx_ucast || wds_tx_mcast) {
6732 		peer->wds_enabled = 1;
6733 		peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
6734 		peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
6735 	} else {
6736 		peer->wds_enabled = 0;
6737 		peer->wds_ecm.wds_tx_ucast_4addr = 0;
6738 		peer->wds_ecm.wds_tx_mcast_4addr = 0;
6739 	}
6740 
6741 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6742 			FL("Policy Update set to :\
6743 				peer->wds_enabled %d\
6744 				peer->wds_ecm.wds_tx_ucast_4addr %d\
6745 				peer->wds_ecm.wds_tx_mcast_4addr %d\n"),
6746 				peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
6747 				peer->wds_ecm.wds_tx_mcast_4addr);
6748 	return;
6749 }
6750 #endif
6751 
6752 static struct cdp_wds_ops dp_ops_wds = {
6753 	.vdev_set_wds = dp_vdev_set_wds,
6754 #ifdef WDS_VENDOR_EXTENSION
6755 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
6756 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
6757 #endif
6758 };
6759 
6760 /*
6761  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
6762  * @soc - datapath soc handle
6763  * @peer - datapath peer handle
6764  *
6765  * Delete the AST entries belonging to a peer
6766  */
6767 #ifdef FEATURE_AST
6768 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
6769 		struct dp_peer *peer)
6770 {
6771 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
6772 
6773 	qdf_spin_lock_bh(&soc->ast_lock);
6774 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
6775 		dp_peer_del_ast(soc, ast_entry);
6776 
6777 	qdf_spin_unlock_bh(&soc->ast_lock);
6778 }
6779 #else
6780 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
6781 		struct dp_peer *peer)
6782 {
6783 }
6784 #endif
6785 
6786 /*
6787  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
6788  * @vdev_handle - datapath vdev handle
6789  * @callback - callback function
6790  * @ctxt: callback context
6791  *
6792  */
6793 static void
6794 dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
6795 		       ol_txrx_data_tx_cb callback, void *ctxt)
6796 {
6797 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6798 
6799 	vdev->tx_non_std_data_callback.func = callback;
6800 	vdev->tx_non_std_data_callback.ctxt = ctxt;
6801 }
6802 
6803 /**
6804  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
6805  * @pdev_hdl: datapath pdev handle
6806  *
6807  * Return: opaque pointer to dp txrx handle
6808  */
6809 static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
6810 {
6811 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
6812 
6813 	return pdev->dp_txrx_handle;
6814 }
6815 
6816 /**
6817  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
6818  * @pdev_hdl: datapath pdev handle
6819  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
6820  *
6821  * Return: void
6822  */
6823 static void
6824 dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
6825 {
6826 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
6827 
6828 	pdev->dp_txrx_handle = dp_txrx_hdl;
6829 }
6830 
6831 /**
6832  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
6833  * @soc_handle: datapath soc handle
6834  *
6835  * Return: opaque pointer to external dp (non-core DP)
6836  */
6837 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
6838 {
6839 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
6840 
6841 	return soc->external_txrx_handle;
6842 }
6843 
6844 /**
6845  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
6846  * @soc_handle: datapath soc handle
6847  * @txrx_handle: opaque pointer to external dp (non-core DP)
6848  *
6849  * Return: void
6850  */
6851 static void
6852 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
6853 {
6854 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
6855 
6856 	soc->external_txrx_handle = txrx_handle;
6857 }
6858 
6859 #ifdef FEATURE_AST
6860 static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
6861 {
6862 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
6863 	struct dp_peer *peer = (struct dp_peer *) peer_hdl;
6864 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
6865 
6866 	/*
6867 	 * For BSS peer, new peer is not created on alloc_node if the
6868 	 * peer with same address already exists , instead refcnt is
6869 	 * increased for existing peer. Correspondingly in delete path,
6870 	 * only refcnt is decreased; and peer is only deleted , when all
6871 	 * references are deleted. So delete_in_progress should not be set
6872 	 * for bss_peer, unless only 2 reference remains (peer map reference
6873 	 * and peer hash table reference).
6874 	 */
6875 	if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
6876 		return;
6877 	}
6878 
6879 	peer->delete_in_progress = true;
6880 	dp_peer_delete_ast_entries(soc, peer);
6881 }
6882 #endif
6883 
6884 #ifdef ATH_SUPPORT_NAC_RSSI
6885 static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
6886 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
6887 		uint8_t chan_num)
6888 {
6889 
6890 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6891 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6892 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
6893 
6894 	pdev->nac_rssi_filtering = 1;
6895 	/* Store address of NAC (neighbour peer) which will be checked
6896 	 * against TA of received packets.
6897 	 */
6898 
6899 	if (cmd == CDP_NAC_PARAM_ADD) {
6900 		qdf_mem_copy(vdev->cdp_nac_rssi.client_mac,
6901 				client_macaddr, DP_MAC_ADDR_LEN);
6902 		vdev->cdp_nac_rssi_enabled = 1;
6903 	} else if (cmd == CDP_NAC_PARAM_DEL) {
6904 		if (!qdf_mem_cmp(vdev->cdp_nac_rssi.client_mac,
6905 			client_macaddr, DP_MAC_ADDR_LEN)) {
6906 				/* delete this peer from the list */
6907 			qdf_mem_zero(vdev->cdp_nac_rssi.client_mac,
6908 				DP_MAC_ADDR_LEN);
6909 		}
6910 		vdev->cdp_nac_rssi_enabled = 0;
6911 	}
6912 
6913 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
6914 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
6915 			(vdev->pdev->osif_pdev, vdev->vdev_id, cmd, bssid);
6916 
6917 	return QDF_STATUS_SUCCESS;
6918 }
6919 #endif
6920 
6921 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
6922 		uint32_t max_peers)
6923 {
6924 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6925 
6926 	soc->max_peers = max_peers;
6927 
6928 	qdf_print ("%s max_peers %u\n", __func__, max_peers);
6929 
6930 	if (dp_peer_find_attach(soc))
6931 		return QDF_STATUS_E_FAILURE;
6932 
6933 	return QDF_STATUS_SUCCESS;
6934 }
6935 
6936 static struct cdp_cmn_ops dp_ops_cmn = {
6937 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
6938 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
6939 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
6940 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
6941 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
6942 	.txrx_peer_create = dp_peer_create_wifi3,
6943 	.txrx_peer_setup = dp_peer_setup_wifi3,
6944 #ifdef FEATURE_AST
6945 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
6946 #else
6947 	.txrx_peer_teardown = NULL,
6948 #endif
6949 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
6950 	.txrx_peer_del_ast = dp_peer_del_ast_wifi3,
6951 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
6952 	.txrx_peer_ast_hash_find = dp_peer_ast_hash_find_wifi3,
6953 	.txrx_peer_ast_get_pdev_id = dp_peer_ast_get_pdev_id_wifi3,
6954 	.txrx_peer_ast_get_next_hop = dp_peer_ast_get_next_hop_wifi3,
6955 	.txrx_peer_ast_set_type = dp_peer_ast_set_type_wifi3,
6956 	.txrx_peer_delete = dp_peer_delete_wifi3,
6957 	.txrx_vdev_register = dp_vdev_register_wifi3,
6958 	.txrx_soc_detach = dp_soc_detach_wifi3,
6959 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
6960 	.txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
6961 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
6962 	.txrx_ath_getstats = dp_get_device_stats,
6963 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
6964 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
6965 	.delba_process = dp_delba_process_wifi3,
6966 	.set_addba_response = dp_set_addba_response,
6967 	.get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
6968 	.flush_cache_rx_queue = NULL,
6969 	/* TODO: get API's for dscp-tid need to be added*/
6970 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
6971 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
6972 	.txrx_stats_request = dp_txrx_stats_request,
6973 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
6974 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
6975 	.txrx_set_nac = dp_set_nac,
6976 	.txrx_get_tx_pending = dp_get_tx_pending,
6977 	.txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
6978 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
6979 	.display_stats = dp_txrx_dump_stats,
6980 	.txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
6981 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
6982 #ifdef DP_INTR_POLL_BASED
6983 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
6984 #else
6985 	.txrx_intr_attach = dp_soc_interrupt_attach,
6986 #endif
6987 	.txrx_intr_detach = dp_soc_interrupt_detach,
6988 	.set_pn_check = dp_set_pn_check_wifi3,
6989 	.update_config_parameters = dp_update_config_parameters,
6990 	/* TODO: Add other functions */
6991 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
6992 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
6993 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
6994 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
6995 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
6996 	.tx_send = dp_tx_send,
6997 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
6998 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
6999 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
7000 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
7001 };
7002 
7003 static struct cdp_ctrl_ops dp_ops_ctrl = {
7004 	.txrx_peer_authorize = dp_peer_authorize,
7005 #ifdef QCA_SUPPORT_SON
7006 	.txrx_set_inact_params = dp_set_inact_params,
7007 	.txrx_start_inact_timer = dp_start_inact_timer,
7008 	.txrx_set_overload = dp_set_overload,
7009 	.txrx_peer_is_inact = dp_peer_is_inact,
7010 	.txrx_mark_peer_inact = dp_mark_peer_inact,
7011 #endif
7012 	.txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
7013 	.txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
7014 #ifdef MESH_MODE_SUPPORT
7015 	.txrx_set_mesh_mode  = dp_peer_set_mesh_mode,
7016 	.txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
7017 #endif
7018 	.txrx_set_vdev_param = dp_set_vdev_param,
7019 	.txrx_peer_set_nawds = dp_peer_set_nawds,
7020 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
7021 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
7022 	.txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
7023 	.txrx_update_filter_neighbour_peers =
7024 		dp_update_filter_neighbour_peers,
7025 	.txrx_get_sec_type = dp_get_sec_type,
7026 	/* TODO: Add other functions */
7027 	.txrx_wdi_event_sub = dp_wdi_event_sub,
7028 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
7029 #ifdef WDI_EVENT_ENABLE
7030 	.txrx_get_pldev = dp_get_pldev,
7031 #endif
7032 	.txrx_set_pdev_param = dp_set_pdev_param,
7033 #ifdef ATH_SUPPORT_NAC_RSSI
7034 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
7035 #endif
7036 };
7037 
7038 static struct cdp_me_ops dp_ops_me = {
7039 #ifdef ATH_SUPPORT_IQUE
7040 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
7041 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
7042 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
7043 #endif
7044 };
7045 
7046 static struct cdp_mon_ops dp_ops_mon = {
7047 	.txrx_monitor_set_filter_ucast_data = NULL,
7048 	.txrx_monitor_set_filter_mcast_data = NULL,
7049 	.txrx_monitor_set_filter_non_data = NULL,
7050 	.txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
7051 	.txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
7052 	.txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
7053 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
7054 	/* Added support for HK advance filter */
7055 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
7056 };
7057 
7058 static struct cdp_host_stats_ops dp_ops_host_stats = {
7059 	.txrx_per_peer_stats = dp_get_host_peer_stats,
7060 	.get_fw_peer_stats = dp_get_fw_peer_stats,
7061 	.get_htt_stats = dp_get_htt_stats,
7062 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
7063 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
7064 	.txrx_stats_publish = dp_txrx_stats_publish,
7065 	/* TODO */
7066 };
7067 
7068 static struct cdp_raw_ops dp_ops_raw = {
7069 	/* TODO */
7070 };
7071 
7072 #ifdef CONFIG_WIN
7073 static struct cdp_pflow_ops dp_ops_pflow = {
7074 	/* TODO */
7075 };
7076 #endif /* CONFIG_WIN */
7077 
7078 #ifdef FEATURE_RUNTIME_PM
7079 /**
7080  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
7081  * @opaque_pdev: DP pdev context
7082  *
7083  * DP is ready to runtime suspend if there are no pending TX packets.
7084  *
7085  * Return: QDF_STATUS
7086  */
7087 static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
7088 {
7089 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7090 	struct dp_soc *soc = pdev->soc;
7091 
7092 	/* Call DP TX flow control API to check if there is any
7093 	   pending packets */
7094 
7095 	if (soc->intr_mode == DP_INTR_POLL)
7096 		qdf_timer_stop(&soc->int_timer);
7097 
7098 	return QDF_STATUS_SUCCESS;
7099 }
7100 
7101 /**
7102  * dp_runtime_resume() - ensure DP is ready to runtime resume
7103  * @opaque_pdev: DP pdev context
7104  *
7105  * Resume DP for runtime PM.
7106  *
7107  * Return: QDF_STATUS
7108  */
7109 static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
7110 {
7111 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7112 	struct dp_soc *soc = pdev->soc;
7113 	void *hal_srng;
7114 	int i;
7115 
7116 	if (soc->intr_mode == DP_INTR_POLL)
7117 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7118 
7119 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
7120 		hal_srng = soc->tcl_data_ring[i].hal_srng;
7121 		if (hal_srng) {
7122 			/* We actually only need to acquire the lock */
7123 			hal_srng_access_start(soc->hal_soc, hal_srng);
7124 			/* Update SRC ring head pointer for HW to send
7125 			   all pending packets */
7126 			hal_srng_access_end(soc->hal_soc, hal_srng);
7127 		}
7128 	}
7129 
7130 	return QDF_STATUS_SUCCESS;
7131 }
7132 #endif /* FEATURE_RUNTIME_PM */
7133 
7134 static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
7135 {
7136 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7137 	struct dp_soc *soc = pdev->soc;
7138 
7139 	if (soc->intr_mode == DP_INTR_POLL)
7140 		qdf_timer_stop(&soc->int_timer);
7141 
7142 	return QDF_STATUS_SUCCESS;
7143 }
7144 
7145 static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
7146 {
7147 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7148 	struct dp_soc *soc = pdev->soc;
7149 
7150 	if (soc->intr_mode == DP_INTR_POLL)
7151 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7152 
7153 	return QDF_STATUS_SUCCESS;
7154 }
7155 
7156 #ifndef CONFIG_WIN
7157 static struct cdp_misc_ops dp_ops_misc = {
7158 	.tx_non_std = dp_tx_non_std,
7159 	.get_opmode = dp_get_opmode,
7160 #ifdef FEATURE_RUNTIME_PM
7161 	.runtime_suspend = dp_runtime_suspend,
7162 	.runtime_resume = dp_runtime_resume,
7163 #endif /* FEATURE_RUNTIME_PM */
7164 	.pkt_log_init = dp_pkt_log_init,
7165 	.pkt_log_con_service = dp_pkt_log_con_service,
7166 };
7167 
7168 static struct cdp_flowctl_ops dp_ops_flowctl = {
7169 	/* WIFI 3.0 DP implement as required. */
7170 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7171 	.flow_pool_map_handler = dp_tx_flow_pool_map,
7172 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
7173 	.register_pause_cb = dp_txrx_register_pause_cb,
7174 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
7175 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
7176 };
7177 
7178 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
7179 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7180 };
7181 
7182 #ifdef IPA_OFFLOAD
7183 static struct cdp_ipa_ops dp_ops_ipa = {
7184 	.ipa_get_resource = dp_ipa_get_resource,
7185 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
7186 	.ipa_op_response = dp_ipa_op_response,
7187 	.ipa_register_op_cb = dp_ipa_register_op_cb,
7188 	.ipa_get_stat = dp_ipa_get_stat,
7189 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
7190 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
7191 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
7192 	.ipa_setup = dp_ipa_setup,
7193 	.ipa_cleanup = dp_ipa_cleanup,
7194 	.ipa_setup_iface = dp_ipa_setup_iface,
7195 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
7196 	.ipa_enable_pipes = dp_ipa_enable_pipes,
7197 	.ipa_disable_pipes = dp_ipa_disable_pipes,
7198 	.ipa_set_perf_level = dp_ipa_set_perf_level
7199 };
7200 #endif
7201 
7202 static struct cdp_bus_ops dp_ops_bus = {
7203 	.bus_suspend = dp_bus_suspend,
7204 	.bus_resume = dp_bus_resume
7205 };
7206 
7207 static struct cdp_ocb_ops dp_ops_ocb = {
7208 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7209 };
7210 
7211 
7212 static struct cdp_throttle_ops dp_ops_throttle = {
7213 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7214 };
7215 
7216 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
7217 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7218 };
7219 
7220 static struct cdp_cfg_ops dp_ops_cfg = {
7221 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7222 };
7223 
7224 /*
7225  * dp_wrapper_peer_get_ref_by_addr - wrapper function to get to peer
7226  * @dev: physical device instance
7227  * @peer_mac_addr: peer mac address
7228  * @local_id: local id for the peer
7229  * @debug_id: to track enum peer access
7230 
7231  * Return: peer instance pointer
7232  */
7233 static inline void *
7234 dp_wrapper_peer_get_ref_by_addr(struct cdp_pdev *dev, u8 *peer_mac_addr,
7235 				u8 *local_id,
7236 				enum peer_debug_id_type debug_id)
7237 {
7238 	/*
7239 	 * Currently this function does not implement the "get ref"
7240 	 * functionality and is mapped to dp_find_peer_by_addr which does not
7241 	 * increment the peer ref count. So the peer state is uncertain after
7242 	 * calling this API. The functionality needs to be implemented.
7243 	 * Accordingly the corresponding release_ref function is NULL.
7244 	 */
7245 	return dp_find_peer_by_addr(dev, peer_mac_addr, local_id);
7246 }
7247 
7248 static struct cdp_peer_ops dp_ops_peer = {
7249 	.register_peer = dp_register_peer,
7250 	.clear_peer = dp_clear_peer,
7251 	.find_peer_by_addr = dp_find_peer_by_addr,
7252 	.find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
7253 	.peer_get_ref_by_addr = dp_wrapper_peer_get_ref_by_addr,
7254 	.peer_release_ref = NULL,
7255 	.local_peer_id = dp_local_peer_id,
7256 	.peer_find_by_local_id = dp_peer_find_by_local_id,
7257 	.peer_state_update = dp_peer_state_update,
7258 	.get_vdevid = dp_get_vdevid,
7259 	.get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
7260 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
7261 	.get_vdev_for_peer = dp_get_vdev_for_peer,
7262 	.get_peer_state = dp_get_peer_state,
7263 	.last_assoc_received = dp_get_last_assoc_received,
7264 	.last_disassoc_received = dp_get_last_disassoc_received,
7265 	.last_deauth_received = dp_get_last_deauth_received,
7266 };
7267 #endif
7268 
7269 static struct cdp_ops dp_txrx_ops = {
7270 	.cmn_drv_ops = &dp_ops_cmn,
7271 	.ctrl_ops = &dp_ops_ctrl,
7272 	.me_ops = &dp_ops_me,
7273 	.mon_ops = &dp_ops_mon,
7274 	.host_stats_ops = &dp_ops_host_stats,
7275 	.wds_ops = &dp_ops_wds,
7276 	.raw_ops = &dp_ops_raw,
7277 #ifdef CONFIG_WIN
7278 	.pflow_ops = &dp_ops_pflow,
7279 #endif /* CONFIG_WIN */
7280 #ifndef CONFIG_WIN
7281 	.misc_ops = &dp_ops_misc,
7282 	.cfg_ops = &dp_ops_cfg,
7283 	.flowctl_ops = &dp_ops_flowctl,
7284 	.l_flowctl_ops = &dp_ops_l_flowctl,
7285 #ifdef IPA_OFFLOAD
7286 	.ipa_ops = &dp_ops_ipa,
7287 #endif
7288 	.bus_ops = &dp_ops_bus,
7289 	.ocb_ops = &dp_ops_ocb,
7290 	.peer_ops = &dp_ops_peer,
7291 	.throttle_ops = &dp_ops_throttle,
7292 	.mob_stats_ops = &dp_ops_mob_stats,
7293 #endif
7294 };
7295 
7296 /*
7297  * dp_soc_set_txrx_ring_map()
7298  * @dp_soc: DP handler for soc
7299  *
7300  * Return: Void
7301  */
7302 static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
7303 {
7304 	uint32_t i;
7305 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
7306 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_DEFAULT_MAP][i];
7307 	}
7308 }
7309 
7310 /*
7311  * dp_soc_attach_wifi3() - Attach txrx SOC
7312  * @ctrl_psoc:	Opaque SOC handle from control plane
7313  * @htc_handle:	Opaque HTC handle
7314  * @hif_handle:	Opaque HIF handle
7315  * @qdf_osdev:	QDF device
7316  *
7317  * Return: DP SOC handle on success, NULL on failure
7318  */
7319 /*
7320  * Local prototype added to temporarily address warning caused by
7321  * -Wmissing-prototypes. A more correct solution, namely to expose
7322  * a prototype in an appropriate header file, will come later.
7323  */
7324 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
7325 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
7326 	struct ol_if_ops *ol_ops);
7327 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
7328 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
7329 	struct ol_if_ops *ol_ops)
7330 {
7331 	struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
7332 
7333 	if (!soc) {
7334 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7335 			FL("DP SOC memory allocation failed"));
7336 		goto fail0;
7337 	}
7338 
7339 	soc->cdp_soc.ops = &dp_txrx_ops;
7340 	soc->cdp_soc.ol_ops = ol_ops;
7341 	soc->ctrl_psoc = ctrl_psoc;
7342 	soc->osdev = qdf_osdev;
7343 	soc->hif_handle = hif_handle;
7344 
7345 	soc->hal_soc = hif_get_hal_handle(hif_handle);
7346 	soc->htt_handle = htt_soc_attach(soc, ctrl_psoc, htc_handle,
7347 		soc->hal_soc, qdf_osdev);
7348 	if (!soc->htt_handle) {
7349 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7350 			FL("HTT attach failed"));
7351 		goto fail1;
7352 	}
7353 
7354 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach();
7355 	if (!soc->wlan_cfg_ctx) {
7356 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7357 				FL("wlan_cfg_soc_attach failed"));
7358 		goto fail2;
7359 	}
7360 
7361 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx, rx_hash);
7362 	soc->cce_disable = false;
7363 
7364 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
7365 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
7366 				CDP_CFG_MAX_PEER_ID);
7367 
7368 		if (ret != -EINVAL) {
7369 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
7370 		}
7371 
7372 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
7373 				CDP_CFG_CCE_DISABLE);
7374 		if (ret == 1)
7375 			soc->cce_disable = true;
7376 	}
7377 
7378 	qdf_spinlock_create(&soc->peer_ref_mutex);
7379 
7380 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
7381 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
7382 
7383 	/* fill the tx/rx cpu ring map*/
7384 	dp_soc_set_txrx_ring_map(soc);
7385 
7386 	qdf_spinlock_create(&soc->htt_stats.lock);
7387 	/* initialize work queue for stats processing */
7388 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
7389 
7390 	/*Initialize inactivity timer for wifison */
7391 	dp_init_inact_timer(soc);
7392 
7393 	return (void *)soc;
7394 
7395 fail2:
7396 	htt_soc_detach(soc->htt_handle);
7397 fail1:
7398 	qdf_mem_free(soc);
7399 fail0:
7400 	return NULL;
7401 }
7402 
7403 /*
7404  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
7405  *
7406  * @soc: handle to DP soc
7407  * @mac_id: MAC id
7408  *
7409  * Return: Return pdev corresponding to MAC
7410  */
7411 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
7412 {
7413 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
7414 		return soc->pdev_list[mac_id];
7415 
7416 	/* Typically for MCL as there only 1 PDEV*/
7417 	return soc->pdev_list[0];
7418 }
7419 
7420 /*
7421  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
7422  * @soc:		DP SoC context
7423  * @max_mac_rings:	No of MAC rings
7424  *
7425  * Return: None
7426  */
7427 static
7428 void dp_is_hw_dbs_enable(struct dp_soc *soc,
7429 				int *max_mac_rings)
7430 {
7431 	bool dbs_enable = false;
7432 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
7433 		dbs_enable = soc->cdp_soc.ol_ops->
7434 		is_hw_dbs_2x2_capable(soc->ctrl_psoc);
7435 
7436 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
7437 }
7438 
7439 /*
7440 * dp_set_pktlog_wifi3() - attach txrx vdev
7441 * @pdev: Datapath PDEV handle
7442 * @event: which event's notifications are being subscribed to
7443 * @enable: WDI event subscribe or not. (True or False)
7444 *
7445 * Return: Success, NULL on failure
7446 */
7447 #ifdef WDI_EVENT_ENABLE
7448 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
7449 	bool enable)
7450 {
7451 	struct dp_soc *soc = pdev->soc;
7452 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
7453 	int max_mac_rings = wlan_cfg_get_num_mac_rings
7454 					(pdev->wlan_cfg_ctx);
7455 	uint8_t mac_id = 0;
7456 
7457 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
7458 
7459 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7460 			FL("Max_mac_rings %d \n"),
7461 			max_mac_rings);
7462 
7463 	if (enable) {
7464 		switch (event) {
7465 		case WDI_EVENT_RX_DESC:
7466 			if (pdev->monitor_vdev) {
7467 				/* Nothing needs to be done if monitor mode is
7468 				 * enabled
7469 				 */
7470 				return 0;
7471 			}
7472 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
7473 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
7474 				htt_tlv_filter.mpdu_start = 1;
7475 				htt_tlv_filter.msdu_start = 1;
7476 				htt_tlv_filter.msdu_end = 1;
7477 				htt_tlv_filter.mpdu_end = 1;
7478 				htt_tlv_filter.packet_header = 1;
7479 				htt_tlv_filter.attention = 1;
7480 				htt_tlv_filter.ppdu_start = 1;
7481 				htt_tlv_filter.ppdu_end = 1;
7482 				htt_tlv_filter.ppdu_end_user_stats = 1;
7483 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
7484 				htt_tlv_filter.ppdu_end_status_done = 1;
7485 				htt_tlv_filter.enable_fp = 1;
7486 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
7487 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
7488 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
7489 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
7490 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
7491 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
7492 
7493 				for (mac_id = 0; mac_id < max_mac_rings;
7494 								mac_id++) {
7495 					int mac_for_pdev =
7496 						dp_get_mac_id_for_pdev(mac_id,
7497 								pdev->pdev_id);
7498 
7499 					htt_h2t_rx_ring_cfg(soc->htt_handle,
7500 					 mac_for_pdev,
7501 					 pdev->rxdma_mon_status_ring[mac_id]
7502 					 .hal_srng,
7503 					 RXDMA_MONITOR_STATUS,
7504 					 RX_BUFFER_SIZE,
7505 					 &htt_tlv_filter);
7506 
7507 				}
7508 
7509 				if (soc->reap_timer_init)
7510 					qdf_timer_mod(&soc->mon_reap_timer,
7511 					DP_INTR_POLL_TIMER_MS);
7512 			}
7513 			break;
7514 
7515 		case WDI_EVENT_LITE_RX:
7516 			if (pdev->monitor_vdev) {
7517 				/* Nothing needs to be done if monitor mode is
7518 				 * enabled
7519 				 */
7520 				return 0;
7521 			}
7522 
7523 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
7524 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
7525 
7526 				htt_tlv_filter.ppdu_start = 1;
7527 				htt_tlv_filter.ppdu_end = 1;
7528 				htt_tlv_filter.ppdu_end_user_stats = 1;
7529 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
7530 				htt_tlv_filter.ppdu_end_status_done = 1;
7531 				htt_tlv_filter.mpdu_start = 1;
7532 				htt_tlv_filter.enable_fp = 1;
7533 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
7534 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
7535 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
7536 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
7537 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
7538 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
7539 
7540 				for (mac_id = 0; mac_id < max_mac_rings;
7541 								mac_id++) {
7542 					int mac_for_pdev =
7543 						dp_get_mac_id_for_pdev(mac_id,
7544 								pdev->pdev_id);
7545 
7546 					htt_h2t_rx_ring_cfg(soc->htt_handle,
7547 					mac_for_pdev,
7548 					pdev->rxdma_mon_status_ring[mac_id]
7549 					.hal_srng,
7550 					RXDMA_MONITOR_STATUS,
7551 					RX_BUFFER_SIZE_PKTLOG_LITE,
7552 					&htt_tlv_filter);
7553 				}
7554 
7555 				if (soc->reap_timer_init)
7556 					qdf_timer_mod(&soc->mon_reap_timer,
7557 					DP_INTR_POLL_TIMER_MS);
7558 			}
7559 			break;
7560 
7561 		case WDI_EVENT_LITE_T2H:
7562 			if (pdev->monitor_vdev) {
7563 				/* Nothing needs to be done if monitor mode is
7564 				 * enabled
7565 				 */
7566 				return 0;
7567 			}
7568 
7569 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
7570 				int mac_for_pdev = dp_get_mac_id_for_pdev(
7571 							mac_id,	pdev->pdev_id);
7572 
7573 				pdev->pktlog_ppdu_stats = true;
7574 				dp_h2t_cfg_stats_msg_send(pdev,
7575 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
7576 					mac_for_pdev);
7577 			}
7578 			break;
7579 
7580 		default:
7581 			/* Nothing needs to be done for other pktlog types */
7582 			break;
7583 		}
7584 	} else {
7585 		switch (event) {
7586 		case WDI_EVENT_RX_DESC:
7587 		case WDI_EVENT_LITE_RX:
7588 			if (pdev->monitor_vdev) {
7589 				/* Nothing needs to be done if monitor mode is
7590 				 * enabled
7591 				 */
7592 				return 0;
7593 			}
7594 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
7595 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
7596 
7597 				for (mac_id = 0; mac_id < max_mac_rings;
7598 								mac_id++) {
7599 					int mac_for_pdev =
7600 						dp_get_mac_id_for_pdev(mac_id,
7601 								pdev->pdev_id);
7602 
7603 					htt_h2t_rx_ring_cfg(soc->htt_handle,
7604 					  mac_for_pdev,
7605 					  pdev->rxdma_mon_status_ring[mac_id]
7606 					  .hal_srng,
7607 					  RXDMA_MONITOR_STATUS,
7608 					  RX_BUFFER_SIZE,
7609 					  &htt_tlv_filter);
7610 				}
7611 
7612 				if (soc->reap_timer_init)
7613 					qdf_timer_stop(&soc->mon_reap_timer);
7614 			}
7615 			break;
7616 		case WDI_EVENT_LITE_T2H:
7617 			if (pdev->monitor_vdev) {
7618 				/* Nothing needs to be done if monitor mode is
7619 				 * enabled
7620 				 */
7621 				return 0;
7622 			}
7623 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
7624 			 * passing value 0. Once these macros will define in htt
7625 			 * header file will use proper macros
7626 			*/
7627 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
7628 				int mac_for_pdev =
7629 						dp_get_mac_id_for_pdev(mac_id,
7630 								pdev->pdev_id);
7631 
7632 				pdev->pktlog_ppdu_stats = false;
7633 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
7634 					dp_h2t_cfg_stats_msg_send(pdev, 0,
7635 								mac_for_pdev);
7636 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
7637 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
7638 								mac_for_pdev);
7639 				} else if (pdev->enhanced_stats_en) {
7640 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
7641 								mac_for_pdev);
7642 				}
7643 			}
7644 
7645 			break;
7646 		default:
7647 			/* Nothing needs to be done for other pktlog types */
7648 			break;
7649 		}
7650 	}
7651 	return 0;
7652 }
7653 #endif
7654 
7655 #ifdef CONFIG_MCL
7656 /*
7657  * dp_service_mon_rings()- timer to reap monitor rings
7658  * reqd as we are not getting ppdu end interrupts
7659  * @arg: SoC Handle
7660  *
7661  * Return:
7662  *
7663  */
7664 static void dp_service_mon_rings(void *arg)
7665 {
7666 	struct dp_soc *soc = (struct dp_soc *) arg;
7667 	int ring = 0, work_done, mac_id;
7668 	struct dp_pdev *pdev = NULL;
7669 
7670 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
7671 		pdev = soc->pdev_list[ring];
7672 		if (pdev == NULL)
7673 			continue;
7674 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
7675 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
7676 								pdev->pdev_id);
7677 			work_done = dp_mon_process(soc, mac_for_pdev,
7678 							QCA_NAPI_BUDGET);
7679 
7680 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
7681 				FL("Reaped %d descs from Monitor rings"),
7682 				work_done);
7683 		}
7684 	}
7685 
7686 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
7687 }
7688 
7689 #ifndef REMOVE_PKT_LOG
7690 /**
7691  * dp_pkt_log_init() - API to initialize packet log
7692  * @ppdev: physical device handle
7693  * @scn: HIF context
7694  *
7695  * Return: none
7696  */
7697 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
7698 {
7699 	struct dp_pdev *handle = (struct dp_pdev *)ppdev;
7700 
7701 	if (handle->pkt_log_init) {
7702 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7703 			 "%s: Packet log not initialized", __func__);
7704 		return;
7705 	}
7706 
7707 	pktlog_sethandle(&handle->pl_dev, scn);
7708 	pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
7709 
7710 	if (pktlogmod_init(scn)) {
7711 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7712 			 "%s: pktlogmod_init failed", __func__);
7713 		handle->pkt_log_init = false;
7714 	} else {
7715 		handle->pkt_log_init = true;
7716 	}
7717 }
7718 
7719 /**
7720  * dp_pkt_log_con_service() - connect packet log service
7721  * @ppdev: physical device handle
7722  * @scn: device context
7723  *
7724  * Return: none
7725  */
7726 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
7727 {
7728 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
7729 
7730 	dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
7731 	pktlog_htc_attach();
7732 }
7733 
7734 /**
7735  * dp_pktlogmod_exit() - API to cleanup pktlog info
7736  * @handle: Pdev handle
7737  *
7738  * Return: none
7739  */
7740 static void dp_pktlogmod_exit(struct dp_pdev *handle)
7741 {
7742 	void *scn = (void *)handle->soc->hif_handle;
7743 
7744 	if (!scn) {
7745 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7746 			 "%s: Invalid hif(scn) handle", __func__);
7747 		return;
7748 	}
7749 
7750 	pktlogmod_exit(scn);
7751 	handle->pkt_log_init = false;
7752 }
7753 #endif
7754 #else
7755 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
7756 #endif
7757 
7758