xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 3149adf58a329e17232a4c0e58d460d025edd55a)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_api.h>
25 #include <hif.h>
26 #include <htt.h>
27 #include <wdi_event.h>
28 #include <queue.h>
29 #include "dp_htt.h"
30 #include "dp_types.h"
31 #include "dp_internal.h"
32 #include "dp_tx.h"
33 #include "dp_tx_desc.h"
34 #include "dp_rx.h"
35 #include <cdp_txrx_handle.h>
36 #include <wlan_cfg.h>
37 #include "cdp_txrx_cmn_struct.h"
38 #include "cdp_txrx_stats_struct.h"
39 #include <qdf_util.h>
40 #include "dp_peer.h"
41 #include "dp_rx_mon.h"
42 #include "htt_stats.h"
43 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
44 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
45 #include "cdp_txrx_flow_ctrl_v2.h"
46 #else
47 static inline void
48 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
49 {
50 	return;
51 }
52 #endif
53 #include "dp_ipa.h"
54 
55 #ifdef CONFIG_MCL
56 static void dp_service_mon_rings(void *arg);
57 #ifndef REMOVE_PKT_LOG
58 #include <pktlog_ac_api.h>
59 #include <pktlog_ac.h>
60 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn);
61 #endif
62 #endif
63 static void dp_pktlogmod_exit(struct dp_pdev *handle);
64 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
65 					uint8_t *peer_mac_addr);
66 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
67 
68 #define DP_INTR_POLL_TIMER_MS	10
69 #define DP_WDS_AGING_TIMER_DEFAULT_MS	120000
70 #define DP_MCS_LENGTH (6*MAX_MCS)
71 #define DP_NSS_LENGTH (6*SS_COUNT)
72 #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
73 #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
74 #define DP_MAX_MCS_STRING_LEN 30
75 #define DP_CURR_FW_STATS_AVAIL 19
76 #define DP_HTT_DBG_EXT_STATS_MAX 256
77 #define DP_MAX_SLEEP_TIME 100
78 
79 #ifdef IPA_OFFLOAD
80 /* Exclude IPA rings from the interrupt context */
81 #define TX_RING_MASK_VAL	0xb
82 #define RX_RING_MASK_VAL	0x7
83 #else
84 #define TX_RING_MASK_VAL	0xF
85 #define RX_RING_MASK_VAL	0xF
86 #endif
87 
88 bool rx_hash = 1;
89 qdf_declare_param(rx_hash, bool);
90 
91 #define STR_MAXLEN	64
92 
93 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
94 
95 /* PPDU stats mask sent to FW to enable enhanced stats */
96 #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
97 /* PPDU stats mask sent to FW to support debug sniffer feature */
98 #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
99 /**
100  * default_dscp_tid_map - Default DSCP-TID mapping
101  *
102  * DSCP        TID
103  * 000000      0
104  * 001000      1
105  * 010000      2
106  * 011000      3
107  * 100000      4
108  * 101000      5
109  * 110000      6
110  * 111000      7
111  */
112 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
113 	0, 0, 0, 0, 0, 0, 0, 0,
114 	1, 1, 1, 1, 1, 1, 1, 1,
115 	2, 2, 2, 2, 2, 2, 2, 2,
116 	3, 3, 3, 3, 3, 3, 3, 3,
117 	4, 4, 4, 4, 4, 4, 4, 4,
118 	5, 5, 5, 5, 5, 5, 5, 5,
119 	6, 6, 6, 6, 6, 6, 6, 6,
120 	7, 7, 7, 7, 7, 7, 7, 7,
121 };
122 
123 /*
124  * struct dp_rate_debug
125  *
126  * @mcs_type: print string for a given mcs
127  * @valid: valid mcs rate?
128  */
129 struct dp_rate_debug {
130 	char mcs_type[DP_MAX_MCS_STRING_LEN];
131 	uint8_t valid;
132 };
133 
134 #define MCS_VALID 1
135 #define MCS_INVALID 0
136 
137 static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
138 
139 	{
140 		{"OFDM 48 Mbps", MCS_VALID},
141 		{"OFDM 24 Mbps", MCS_VALID},
142 		{"OFDM 12 Mbps", MCS_VALID},
143 		{"OFDM 6 Mbps ", MCS_VALID},
144 		{"OFDM 54 Mbps", MCS_VALID},
145 		{"OFDM 36 Mbps", MCS_VALID},
146 		{"OFDM 18 Mbps", MCS_VALID},
147 		{"OFDM 9 Mbps ", MCS_VALID},
148 		{"INVALID ", MCS_INVALID},
149 		{"INVALID ", MCS_INVALID},
150 		{"INVALID ", MCS_INVALID},
151 		{"INVALID ", MCS_INVALID},
152 		{"INVALID ", MCS_VALID},
153 	},
154 	{
155 		{"CCK 11 Mbps Long  ", MCS_VALID},
156 		{"CCK 5.5 Mbps Long ", MCS_VALID},
157 		{"CCK 2 Mbps Long   ", MCS_VALID},
158 		{"CCK 1 Mbps Long   ", MCS_VALID},
159 		{"CCK 11 Mbps Short ", MCS_VALID},
160 		{"CCK 5.5 Mbps Short", MCS_VALID},
161 		{"CCK 2 Mbps Short  ", MCS_VALID},
162 		{"INVALID ", MCS_INVALID},
163 		{"INVALID ", MCS_INVALID},
164 		{"INVALID ", MCS_INVALID},
165 		{"INVALID ", MCS_INVALID},
166 		{"INVALID ", MCS_INVALID},
167 		{"INVALID ", MCS_VALID},
168 	},
169 	{
170 		{"HT MCS 0 (BPSK 1/2)  ", MCS_VALID},
171 		{"HT MCS 1 (QPSK 1/2)  ", MCS_VALID},
172 		{"HT MCS 2 (QPSK 3/4)  ", MCS_VALID},
173 		{"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
174 		{"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
175 		{"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
176 		{"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
177 		{"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
178 		{"INVALID ", MCS_INVALID},
179 		{"INVALID ", MCS_INVALID},
180 		{"INVALID ", MCS_INVALID},
181 		{"INVALID ", MCS_INVALID},
182 		{"INVALID ", MCS_VALID},
183 	},
184 	{
185 		{"VHT MCS 0 (BPSK 1/2)     ", MCS_VALID},
186 		{"VHT MCS 1 (QPSK 1/2)     ", MCS_VALID},
187 		{"VHT MCS 2 (QPSK 3/4)     ", MCS_VALID},
188 		{"VHT MCS 3 (16-QAM 1/2)   ", MCS_VALID},
189 		{"VHT MCS 4 (16-QAM 3/4)   ", MCS_VALID},
190 		{"VHT MCS 5 (64-QAM 2/3)   ", MCS_VALID},
191 		{"VHT MCS 6 (64-QAM 3/4)   ", MCS_VALID},
192 		{"VHT MCS 7 (64-QAM 5/6)   ", MCS_VALID},
193 		{"VHT MCS 8 (256-QAM 3/4)  ", MCS_VALID},
194 		{"VHT MCS 9 (256-QAM 5/6)  ", MCS_VALID},
195 		{"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
196 		{"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
197 		{"INVALID ", MCS_VALID},
198 	},
199 	{
200 		{"HE MCS 0 (BPSK 1/2)     ", MCS_VALID},
201 		{"HE MCS 1 (QPSK 1/2)     ", MCS_VALID},
202 		{"HE MCS 2 (QPSK 3/4)     ", MCS_VALID},
203 		{"HE MCS 3 (16-QAM 1/2)   ", MCS_VALID},
204 		{"HE MCS 4 (16-QAM 3/4)   ", MCS_VALID},
205 		{"HE MCS 5 (64-QAM 2/3)   ", MCS_VALID},
206 		{"HE MCS 6 (64-QAM 3/4)   ", MCS_VALID},
207 		{"HE MCS 7 (64-QAM 5/6)   ", MCS_VALID},
208 		{"HE MCS 8 (256-QAM 3/4)  ", MCS_VALID},
209 		{"HE MCS 9 (256-QAM 5/6)  ", MCS_VALID},
210 		{"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
211 		{"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
212 		{"INVALID ", MCS_VALID},
213 	}
214 };
215 
216 /**
217  * @brief Cpu ring map types
218  */
219 enum dp_cpu_ring_map_types {
220 	DP_DEFAULT_MAP,
221 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
222 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
223 	DP_NSS_ALL_RADIO_OFFLOADED_MAP,
224 	DP_CPU_RING_MAP_MAX
225 };
226 
227 /**
228  * @brief Cpu to tx ring map
229  */
230 static uint8_t dp_cpu_ring_map[DP_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
231 	{0x0, 0x1, 0x2, 0x0},
232 	{0x1, 0x2, 0x1, 0x2},
233 	{0x0, 0x2, 0x0, 0x2},
234 	{0x2, 0x2, 0x2, 0x2}
235 };
236 
237 /**
238  * @brief Select the type of statistics
239  */
240 enum dp_stats_type {
241 	STATS_FW = 0,
242 	STATS_HOST = 1,
243 	STATS_TYPE_MAX = 2,
244 };
245 
246 /**
247  * @brief General Firmware statistics options
248  *
249  */
250 enum dp_fw_stats {
251 	TXRX_FW_STATS_INVALID	= -1,
252 };
253 
254 /**
255  * dp_stats_mapping_table - Firmware and Host statistics
256  * currently supported
257  */
258 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
259 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
260 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
261 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
262 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
263 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
264 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
265 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
266 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
267 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
268 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
269 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
270 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
271 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
272 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
273 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
274 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
275 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
276 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
277 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
278 	/* Last ENUM for HTT FW STATS */
279 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
280 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
281 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
282 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
283 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
284 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
285 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
286 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
287 };
288 
289 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
290 					struct cdp_peer *peer_hdl,
291 					uint8_t *mac_addr,
292 					enum cdp_txrx_ast_entry_type type,
293 					uint32_t flags)
294 {
295 
296 	return dp_peer_add_ast((struct dp_soc *)soc_hdl,
297 				(struct dp_peer *)peer_hdl,
298 				mac_addr,
299 				type,
300 				flags);
301 }
302 
303 static void dp_peer_del_ast_wifi3(struct cdp_soc_t *soc_hdl,
304 					 void *ast_entry_hdl)
305 {
306 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
307 	qdf_spin_lock_bh(&soc->ast_lock);
308 	dp_peer_del_ast((struct dp_soc *)soc_hdl,
309 			(struct dp_ast_entry *)ast_entry_hdl);
310 	qdf_spin_unlock_bh(&soc->ast_lock);
311 }
312 
313 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
314 						struct cdp_peer *peer_hdl,
315 						void *ast_entry_hdl,
316 						uint32_t flags)
317 {
318 	int status;
319 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
320 	qdf_spin_lock_bh(&soc->ast_lock);
321 	status = dp_peer_update_ast(soc,
322 					(struct dp_peer *)peer_hdl,
323 					(struct dp_ast_entry *)ast_entry_hdl,
324 					flags);
325 	qdf_spin_unlock_bh(&soc->ast_lock);
326 	return status;
327 }
328 
329 static void *dp_peer_ast_hash_find_wifi3(struct cdp_soc_t *soc_hdl,
330 						uint8_t *ast_mac_addr)
331 {
332 	struct dp_ast_entry *ast_entry;
333 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
334 	qdf_spin_lock_bh(&soc->ast_lock);
335 	ast_entry = dp_peer_ast_hash_find(soc, ast_mac_addr);
336 	qdf_spin_unlock_bh(&soc->ast_lock);
337 	return (void *)ast_entry;
338 }
339 
340 static uint8_t dp_peer_ast_get_pdev_id_wifi3(struct cdp_soc_t *soc_hdl,
341 							void *ast_entry_hdl)
342 {
343 	return dp_peer_ast_get_pdev_id((struct dp_soc *)soc_hdl,
344 					(struct dp_ast_entry *)ast_entry_hdl);
345 }
346 
347 static uint8_t dp_peer_ast_get_next_hop_wifi3(struct cdp_soc_t *soc_hdl,
348 							void *ast_entry_hdl)
349 {
350 	return dp_peer_ast_get_next_hop((struct dp_soc *)soc_hdl,
351 					(struct dp_ast_entry *)ast_entry_hdl);
352 }
353 
354 static void dp_peer_ast_set_type_wifi3(
355 					struct cdp_soc_t *soc_hdl,
356 					void *ast_entry_hdl,
357 					enum cdp_txrx_ast_entry_type type)
358 {
359 	dp_peer_ast_set_type((struct dp_soc *)soc_hdl,
360 				(struct dp_ast_entry *)ast_entry_hdl,
361 				type);
362 }
363 
364 
365 
366 /**
367  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
368  * @ring_num: ring num of the ring being queried
369  * @grp_mask: the grp_mask array for the ring type in question.
370  *
371  * The grp_mask array is indexed by group number and the bit fields correspond
372  * to ring numbers.  We are finding which interrupt group a ring belongs to.
373  *
374  * Return: the index in the grp_mask array with the ring number.
375  * -QDF_STATUS_E_NOENT if no entry is found
376  */
377 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
378 {
379 	int ext_group_num;
380 	int mask = 1 << ring_num;
381 
382 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
383 	     ext_group_num++) {
384 		if (mask & grp_mask[ext_group_num])
385 			return ext_group_num;
386 	}
387 
388 	return -QDF_STATUS_E_NOENT;
389 }
390 
391 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
392 				       enum hal_ring_type ring_type,
393 				       int ring_num)
394 {
395 	int *grp_mask;
396 
397 	switch (ring_type) {
398 	case WBM2SW_RELEASE:
399 		/* dp_tx_comp_handler - soc->tx_comp_ring */
400 		if (ring_num < 3)
401 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
402 
403 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
404 		else if (ring_num == 3) {
405 			/* sw treats this as a separate ring type */
406 			grp_mask = &soc->wlan_cfg_ctx->
407 				int_rx_wbm_rel_ring_mask[0];
408 			ring_num = 0;
409 		} else {
410 			qdf_assert(0);
411 			return -QDF_STATUS_E_NOENT;
412 		}
413 	break;
414 
415 	case REO_EXCEPTION:
416 		/* dp_rx_err_process - &soc->reo_exception_ring */
417 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
418 	break;
419 
420 	case REO_DST:
421 		/* dp_rx_process - soc->reo_dest_ring */
422 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
423 	break;
424 
425 	case REO_STATUS:
426 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
427 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
428 	break;
429 
430 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
431 	case RXDMA_MONITOR_STATUS:
432 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
433 	case RXDMA_MONITOR_DST:
434 		/* dp_mon_process */
435 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
436 	break;
437 	case RXDMA_DST:
438 		/* dp_rxdma_err_process */
439 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
440 	break;
441 
442 	case RXDMA_BUF:
443 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
444 	break;
445 
446 	case RXDMA_MONITOR_BUF:
447 		/* TODO: support low_thresh interrupt */
448 		return -QDF_STATUS_E_NOENT;
449 	break;
450 
451 	case TCL_DATA:
452 	case TCL_CMD:
453 	case REO_CMD:
454 	case SW2WBM_RELEASE:
455 	case WBM_IDLE_LINK:
456 		/* normally empty SW_TO_HW rings */
457 		return -QDF_STATUS_E_NOENT;
458 	break;
459 
460 	case TCL_STATUS:
461 	case REO_REINJECT:
462 		/* misc unused rings */
463 		return -QDF_STATUS_E_NOENT;
464 	break;
465 
466 	case CE_SRC:
467 	case CE_DST:
468 	case CE_DST_STATUS:
469 		/* CE_rings - currently handled by hif */
470 	default:
471 		return -QDF_STATUS_E_NOENT;
472 	break;
473 	}
474 
475 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
476 }
477 
478 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
479 			      *ring_params, int ring_type, int ring_num)
480 {
481 	int msi_group_number;
482 	int msi_data_count;
483 	int ret;
484 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
485 
486 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
487 					    &msi_data_count, &msi_data_start,
488 					    &msi_irq_start);
489 
490 	if (ret)
491 		return;
492 
493 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
494 						       ring_num);
495 	if (msi_group_number < 0) {
496 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
497 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
498 			ring_type, ring_num);
499 		ring_params->msi_addr = 0;
500 		ring_params->msi_data = 0;
501 		return;
502 	}
503 
504 	if (msi_group_number > msi_data_count) {
505 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
506 			FL("2 msi_groups will share an msi; msi_group_num %d"),
507 			msi_group_number);
508 
509 		QDF_ASSERT(0);
510 	}
511 
512 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
513 
514 	ring_params->msi_addr = addr_low;
515 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
516 	ring_params->msi_data = (msi_group_number % msi_data_count)
517 		+ msi_data_start;
518 	ring_params->flags |= HAL_SRNG_MSI_INTR;
519 }
520 
521 /**
522  * dp_print_ast_stats() - Dump AST table contents
523  * @soc: Datapath soc handle
524  *
525  * return void
526  */
527 #ifdef FEATURE_WDS
528 static void dp_print_ast_stats(struct dp_soc *soc)
529 {
530 	uint8_t i;
531 	uint8_t num_entries = 0;
532 	struct dp_vdev *vdev;
533 	struct dp_pdev *pdev;
534 	struct dp_peer *peer;
535 	struct dp_ast_entry *ase, *tmp_ase;
536 
537 	DP_PRINT_STATS("AST Stats:");
538 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
539 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
540 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
541 	DP_PRINT_STATS("AST Table:");
542 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
543 		pdev = soc->pdev_list[i];
544 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
545 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
546 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
547 					DP_PRINT_STATS("%6d mac_addr = %pM"
548 							" peer_mac_addr = %pM"
549 							" type = %d"
550 							" next_hop = %d"
551 							" is_active = %d"
552 							" is_bss = %d"
553 							" ast_idx = %d"
554 							" pdev_id = %d"
555 							" vdev_id = %d",
556 							++num_entries,
557 							ase->mac_addr.raw,
558 							ase->peer->mac_addr.raw,
559 							ase->type,
560 							ase->next_hop,
561 							ase->is_active,
562 							ase->is_bss,
563 							ase->ast_idx,
564 							ase->pdev_id,
565 							ase->vdev_id);
566 				}
567 			}
568 		}
569 	}
570 }
571 #else
572 static void dp_print_ast_stats(struct dp_soc *soc)
573 {
574 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_WDS");
575 	return;
576 }
577 #endif
578 
579 /*
580  * dp_setup_srng - Internal function to setup SRNG rings used by data path
581  */
582 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
583 	int ring_type, int ring_num, int mac_id, uint32_t num_entries)
584 {
585 	void *hal_soc = soc->hal_soc;
586 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
587 	/* TODO: See if we should get align size from hal */
588 	uint32_t ring_base_align = 8;
589 	struct hal_srng_params ring_params;
590 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
591 
592 	/* TODO: Currently hal layer takes care of endianness related settings.
593 	 * See if these settings need to passed from DP layer
594 	 */
595 	ring_params.flags = 0;
596 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
597 		  FL("Ring type: %d, num:%d"), ring_type, ring_num);
598 
599 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
600 	srng->hal_srng = NULL;
601 	srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
602 	srng->num_entries = num_entries;
603 	srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
604 		soc->osdev, soc->osdev->dev, srng->alloc_size,
605 		&(srng->base_paddr_unaligned));
606 
607 	if (!srng->base_vaddr_unaligned) {
608 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
609 			FL("alloc failed - ring_type: %d, ring_num %d"),
610 			ring_type, ring_num);
611 		return QDF_STATUS_E_NOMEM;
612 	}
613 
614 	ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
615 		((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
616 	ring_params.ring_base_paddr = srng->base_paddr_unaligned +
617 		((unsigned long)(ring_params.ring_base_vaddr) -
618 		(unsigned long)srng->base_vaddr_unaligned);
619 	ring_params.num_entries = num_entries;
620 
621 	if (soc->intr_mode == DP_INTR_MSI) {
622 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
623 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
624 			FL("Using MSI for ring_type: %d, ring_num %d"),
625 			ring_type, ring_num);
626 
627 	} else {
628 		ring_params.msi_data = 0;
629 		ring_params.msi_addr = 0;
630 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
631 			FL("Skipping MSI for ring_type: %d, ring_num %d"),
632 			ring_type, ring_num);
633 	}
634 
635 	/*
636 	 * Setup interrupt timer and batch counter thresholds for
637 	 * interrupt mitigation based on ring type
638 	 */
639 	if (ring_type == REO_DST) {
640 		ring_params.intr_timer_thres_us =
641 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
642 		ring_params.intr_batch_cntr_thres_entries =
643 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
644 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
645 		ring_params.intr_timer_thres_us =
646 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
647 		ring_params.intr_batch_cntr_thres_entries =
648 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
649 	} else {
650 		ring_params.intr_timer_thres_us =
651 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
652 		ring_params.intr_batch_cntr_thres_entries =
653 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
654 	}
655 
656 	/* Enable low threshold interrupts for rx buffer rings (regular and
657 	 * monitor buffer rings.
658 	 * TODO: See if this is required for any other ring
659 	 */
660 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
661 		(ring_type == RXDMA_MONITOR_STATUS)) {
662 		/* TODO: Setting low threshold to 1/8th of ring size
663 		 * see if this needs to be configurable
664 		 */
665 		ring_params.low_threshold = num_entries >> 3;
666 		ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
667 		ring_params.intr_timer_thres_us = 0x1000;
668 	}
669 
670 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
671 		mac_id, &ring_params);
672 
673 	if (!srng->hal_srng) {
674 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
675 				srng->alloc_size,
676 				srng->base_vaddr_unaligned,
677 				srng->base_paddr_unaligned, 0);
678 	}
679 
680 	return 0;
681 }
682 
683 /**
684  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
685  * Any buffers allocated and attached to ring entries are expected to be freed
686  * before calling this function.
687  */
688 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
689 	int ring_type, int ring_num)
690 {
691 	if (!srng->hal_srng) {
692 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
693 			FL("Ring type: %d, num:%d not setup"),
694 			ring_type, ring_num);
695 		return;
696 	}
697 
698 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
699 
700 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
701 				srng->alloc_size,
702 				srng->base_vaddr_unaligned,
703 				srng->base_paddr_unaligned, 0);
704 	srng->hal_srng = NULL;
705 }
706 
707 /* TODO: Need this interface from HIF */
708 void *hif_get_hal_handle(void *hif_handle);
709 
710 /*
711  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
712  * @dp_ctx: DP SOC handle
713  * @budget: Number of frames/descriptors that can be processed in one shot
714  *
715  * Return: remaining budget/quota for the soc device
716  */
717 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
718 {
719 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
720 	struct dp_soc *soc = int_ctx->soc;
721 	int ring = 0;
722 	uint32_t work_done  = 0;
723 	int budget = dp_budget;
724 	uint8_t tx_mask = int_ctx->tx_ring_mask;
725 	uint8_t rx_mask = int_ctx->rx_ring_mask;
726 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
727 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
728 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
729 	uint32_t remaining_quota = dp_budget;
730 	struct dp_pdev *pdev = NULL;
731 
732 	/* Process Tx completion interrupts first to return back buffers */
733 	while (tx_mask) {
734 		if (tx_mask & 0x1) {
735 			work_done = dp_tx_comp_handler(soc,
736 					soc->tx_comp_ring[ring].hal_srng,
737 					remaining_quota);
738 
739 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
740 				"tx mask 0x%x ring %d, budget %d, work_done %d",
741 				tx_mask, ring, budget, work_done);
742 
743 			budget -= work_done;
744 			if (budget <= 0)
745 				goto budget_done;
746 
747 			remaining_quota = budget;
748 		}
749 		tx_mask = tx_mask >> 1;
750 		ring++;
751 	}
752 
753 
754 	/* Process REO Exception ring interrupt */
755 	if (rx_err_mask) {
756 		work_done = dp_rx_err_process(soc,
757 				soc->reo_exception_ring.hal_srng,
758 				remaining_quota);
759 
760 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
761 			"REO Exception Ring: work_done %d budget %d",
762 			work_done, budget);
763 
764 		budget -=  work_done;
765 		if (budget <= 0) {
766 			goto budget_done;
767 		}
768 		remaining_quota = budget;
769 	}
770 
771 	/* Process Rx WBM release ring interrupt */
772 	if (rx_wbm_rel_mask) {
773 		work_done = dp_rx_wbm_err_process(soc,
774 				soc->rx_rel_ring.hal_srng, remaining_quota);
775 
776 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
777 			"WBM Release Ring: work_done %d budget %d",
778 			work_done, budget);
779 
780 		budget -=  work_done;
781 		if (budget <= 0) {
782 			goto budget_done;
783 		}
784 		remaining_quota = budget;
785 	}
786 
787 	/* Process Rx interrupts */
788 	if (rx_mask) {
789 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
790 			if (rx_mask & (1 << ring)) {
791 				work_done = dp_rx_process(int_ctx,
792 					    soc->reo_dest_ring[ring].hal_srng,
793 					    remaining_quota);
794 
795 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
796 					"rx mask 0x%x ring %d, work_done %d budget %d",
797 					rx_mask, ring, work_done, budget);
798 
799 				budget -=  work_done;
800 				if (budget <= 0)
801 					goto budget_done;
802 				remaining_quota = budget;
803 			}
804 		}
805 		for (ring = 0; ring < MAX_RX_MAC_RINGS; ring++) {
806 			/* Need to check on this, why is required */
807 			work_done = dp_rxdma_err_process(soc, ring,
808 						remaining_quota);
809 			budget -= work_done;
810 		}
811 	}
812 
813 	if (reo_status_mask)
814 		dp_reo_status_ring_handler(soc);
815 
816 	/* Process LMAC interrupts */
817 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
818 		pdev = soc->pdev_list[ring];
819 		if (pdev == NULL)
820 			continue;
821 		if (int_ctx->rx_mon_ring_mask & (1 << ring)) {
822 			work_done = dp_mon_process(soc, ring, remaining_quota);
823 			budget -= work_done;
824 			if (budget <= 0)
825 				goto budget_done;
826 			remaining_quota = budget;
827 		}
828 
829 		if (int_ctx->rxdma2host_ring_mask & (1 << ring)) {
830 			work_done = dp_rxdma_err_process(soc, ring,
831 						remaining_quota);
832 			budget -=  work_done;
833 			if (budget <= 0)
834 				goto budget_done;
835 			remaining_quota = budget;
836 		}
837 
838 		if (int_ctx->host2rxdma_ring_mask & (1 << ring)) {
839 			union dp_rx_desc_list_elem_t *desc_list = NULL;
840 			union dp_rx_desc_list_elem_t *tail = NULL;
841 			struct dp_srng *rx_refill_buf_ring =
842 				&pdev->rx_refill_buf_ring;
843 
844 			DP_STATS_INC(pdev, replenish.low_thresh_intrs, 1);
845 			dp_rx_buffers_replenish(soc, ring,
846 				rx_refill_buf_ring,
847 				&soc->rx_desc_buf[ring], 0,
848 				&desc_list, &tail, HAL_RX_BUF_RBM_SW3_BM);
849 		}
850 	}
851 
852 	qdf_lro_flush(int_ctx->lro_ctx);
853 
854 budget_done:
855 	return dp_budget - budget;
856 }
857 
858 #ifdef DP_INTR_POLL_BASED
859 /* dp_interrupt_timer()- timer poll for interrupts
860  *
861  * @arg: SoC Handle
862  *
863  * Return:
864  *
865  */
866 static void dp_interrupt_timer(void *arg)
867 {
868 	struct dp_soc *soc = (struct dp_soc *) arg;
869 	int i;
870 
871 	if (qdf_atomic_read(&soc->cmn_init_done)) {
872 		for (i = 0;
873 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
874 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
875 
876 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
877 	}
878 }
879 
880 /*
881  * dp_soc_interrupt_attach_poll() - Register handlers for DP interrupts
882  * @txrx_soc: DP SOC handle
883  *
884  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
885  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
886  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
887  *
888  * Return: 0 for success. nonzero for failure.
889  */
890 static QDF_STATUS dp_soc_interrupt_attach_poll(void *txrx_soc)
891 {
892 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
893 	int i;
894 
895 	soc->intr_mode = DP_INTR_POLL;
896 
897 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
898 		soc->intr_ctx[i].dp_intr_id = i;
899 		soc->intr_ctx[i].tx_ring_mask =
900 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
901 		soc->intr_ctx[i].rx_ring_mask =
902 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
903 		soc->intr_ctx[i].rx_mon_ring_mask =
904 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
905 		soc->intr_ctx[i].rx_err_ring_mask =
906 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
907 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
908 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
909 		soc->intr_ctx[i].reo_status_ring_mask =
910 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
911 		soc->intr_ctx[i].rxdma2host_ring_mask =
912 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
913 		soc->intr_ctx[i].soc = soc;
914 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
915 	}
916 
917 	qdf_timer_init(soc->osdev, &soc->int_timer,
918 			dp_interrupt_timer, (void *)soc,
919 			QDF_TIMER_TYPE_WAKE_APPS);
920 
921 	return QDF_STATUS_SUCCESS;
922 }
923 
924 #if defined(CONFIG_MCL)
925 extern int con_mode_monitor;
926 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
927 /*
928  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
929  * @txrx_soc: DP SOC handle
930  *
931  * Call the appropriate attach function based on the mode of operation.
932  * This is a WAR for enabling monitor mode.
933  *
934  * Return: 0 for success. nonzero for failure.
935  */
936 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
937 {
938 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
939 
940 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
941 	     con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
942 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
943 				  "%s: Poll mode", __func__);
944 		return dp_soc_interrupt_attach_poll(txrx_soc);
945 	} else {
946 
947 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
948 				  "%s: Interrupt  mode", __func__);
949 		return dp_soc_interrupt_attach(txrx_soc);
950 	}
951 }
952 #else
953 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
954 {
955 	return dp_soc_interrupt_attach_poll(txrx_soc);
956 }
957 #endif
958 #endif
959 
960 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
961 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
962 {
963 	int j;
964 	int num_irq = 0;
965 
966 	int tx_mask =
967 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
968 	int rx_mask =
969 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
970 	int rx_mon_mask =
971 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
972 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
973 					soc->wlan_cfg_ctx, intr_ctx_num);
974 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
975 					soc->wlan_cfg_ctx, intr_ctx_num);
976 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
977 					soc->wlan_cfg_ctx, intr_ctx_num);
978 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
979 					soc->wlan_cfg_ctx, intr_ctx_num);
980 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
981 					soc->wlan_cfg_ctx, intr_ctx_num);
982 
983 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
984 
985 		if (tx_mask & (1 << j)) {
986 			irq_id_map[num_irq++] =
987 				(wbm2host_tx_completions_ring1 - j);
988 		}
989 
990 		if (rx_mask & (1 << j)) {
991 			irq_id_map[num_irq++] =
992 				(reo2host_destination_ring1 - j);
993 		}
994 
995 		if (rxdma2host_ring_mask & (1 << j)) {
996 			irq_id_map[num_irq++] =
997 				rxdma2host_destination_ring_mac1 -
998 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
999 		}
1000 
1001 		if (host2rxdma_ring_mask & (1 << j)) {
1002 			irq_id_map[num_irq++] =
1003 				host2rxdma_host_buf_ring_mac1 -
1004 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1005 		}
1006 
1007 		if (rx_mon_mask & (1 << j)) {
1008 			irq_id_map[num_irq++] =
1009 				ppdu_end_interrupts_mac1 -
1010 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1011 			irq_id_map[num_irq++] =
1012 				rxdma2host_monitor_status_ring_mac1 -
1013 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1014 		}
1015 
1016 		if (rx_wbm_rel_ring_mask & (1 << j))
1017 			irq_id_map[num_irq++] = wbm2host_rx_release;
1018 
1019 		if (rx_err_ring_mask & (1 << j))
1020 			irq_id_map[num_irq++] = reo2host_exception;
1021 
1022 		if (reo_status_ring_mask & (1 << j))
1023 			irq_id_map[num_irq++] = reo2host_status;
1024 
1025 	}
1026 	*num_irq_r = num_irq;
1027 }
1028 
1029 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1030 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1031 		int msi_vector_count, int msi_vector_start)
1032 {
1033 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1034 					soc->wlan_cfg_ctx, intr_ctx_num);
1035 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1036 					soc->wlan_cfg_ctx, intr_ctx_num);
1037 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1038 					soc->wlan_cfg_ctx, intr_ctx_num);
1039 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1040 					soc->wlan_cfg_ctx, intr_ctx_num);
1041 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1042 					soc->wlan_cfg_ctx, intr_ctx_num);
1043 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1044 					soc->wlan_cfg_ctx, intr_ctx_num);
1045 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1046 					soc->wlan_cfg_ctx, intr_ctx_num);
1047 
1048 	unsigned int vector =
1049 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1050 	int num_irq = 0;
1051 
1052 	soc->intr_mode = DP_INTR_MSI;
1053 
1054 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1055 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1056 		irq_id_map[num_irq++] =
1057 			pld_get_msi_irq(soc->osdev->dev, vector);
1058 
1059 	*num_irq_r = num_irq;
1060 }
1061 
1062 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1063 				    int *irq_id_map, int *num_irq)
1064 {
1065 	int msi_vector_count, ret;
1066 	uint32_t msi_base_data, msi_vector_start;
1067 
1068 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1069 					    &msi_vector_count,
1070 					    &msi_base_data,
1071 					    &msi_vector_start);
1072 	if (ret)
1073 		return dp_soc_interrupt_map_calculate_integrated(soc,
1074 				intr_ctx_num, irq_id_map, num_irq);
1075 
1076 	else
1077 		dp_soc_interrupt_map_calculate_msi(soc,
1078 				intr_ctx_num, irq_id_map, num_irq,
1079 				msi_vector_count, msi_vector_start);
1080 }
1081 
1082 /*
1083  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1084  * @txrx_soc: DP SOC handle
1085  *
1086  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1087  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1088  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1089  *
1090  * Return: 0 for success. nonzero for failure.
1091  */
1092 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
1093 {
1094 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1095 
1096 	int i = 0;
1097 	int num_irq = 0;
1098 
1099 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1100 		int ret = 0;
1101 
1102 		/* Map of IRQ ids registered with one interrupt context */
1103 		int irq_id_map[HIF_MAX_GRP_IRQ];
1104 
1105 		int tx_mask =
1106 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1107 		int rx_mask =
1108 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1109 		int rx_mon_mask =
1110 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1111 		int rx_err_ring_mask =
1112 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1113 		int rx_wbm_rel_ring_mask =
1114 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1115 		int reo_status_ring_mask =
1116 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1117 		int rxdma2host_ring_mask =
1118 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1119 		int host2rxdma_ring_mask =
1120 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1121 
1122 
1123 		soc->intr_ctx[i].dp_intr_id = i;
1124 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1125 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1126 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1127 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1128 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1129 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1130 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1131 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1132 
1133 		soc->intr_ctx[i].soc = soc;
1134 
1135 		num_irq = 0;
1136 
1137 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1138 					       &num_irq);
1139 
1140 		ret = hif_register_ext_group(soc->hif_handle,
1141 				num_irq, irq_id_map, dp_service_srngs,
1142 				&soc->intr_ctx[i], "dp_intr",
1143 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1144 
1145 		if (ret) {
1146 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1147 			FL("failed, ret = %d"), ret);
1148 
1149 			return QDF_STATUS_E_FAILURE;
1150 		}
1151 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1152 	}
1153 
1154 	hif_configure_ext_group_interrupts(soc->hif_handle);
1155 
1156 	return QDF_STATUS_SUCCESS;
1157 }
1158 
1159 /*
1160  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1161  * @txrx_soc: DP SOC handle
1162  *
1163  * Return: void
1164  */
1165 static void dp_soc_interrupt_detach(void *txrx_soc)
1166 {
1167 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1168 	int i;
1169 
1170 	if (soc->intr_mode == DP_INTR_POLL) {
1171 		qdf_timer_stop(&soc->int_timer);
1172 		qdf_timer_free(&soc->int_timer);
1173 	} else {
1174 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1175 	}
1176 
1177 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1178 		soc->intr_ctx[i].tx_ring_mask = 0;
1179 		soc->intr_ctx[i].rx_ring_mask = 0;
1180 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1181 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1182 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1183 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1184 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1185 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1186 
1187 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1188 	}
1189 }
1190 
1191 #define AVG_MAX_MPDUS_PER_TID 128
1192 #define AVG_TIDS_PER_CLIENT 2
1193 #define AVG_FLOWS_PER_TID 2
1194 #define AVG_MSDUS_PER_FLOW 128
1195 #define AVG_MSDUS_PER_MPDU 4
1196 
1197 /*
1198  * Allocate and setup link descriptor pool that will be used by HW for
1199  * various link and queue descriptors and managed by WBM
1200  */
1201 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1202 {
1203 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1204 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1205 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1206 	uint32_t num_mpdus_per_link_desc =
1207 		hal_num_mpdus_per_link_desc(soc->hal_soc);
1208 	uint32_t num_msdus_per_link_desc =
1209 		hal_num_msdus_per_link_desc(soc->hal_soc);
1210 	uint32_t num_mpdu_links_per_queue_desc =
1211 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1212 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1213 	uint32_t total_link_descs, total_mem_size;
1214 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1215 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1216 	uint32_t num_link_desc_banks;
1217 	uint32_t last_bank_size = 0;
1218 	uint32_t entry_size, num_entries;
1219 	int i;
1220 	uint32_t desc_id = 0;
1221 
1222 	/* Only Tx queue descriptors are allocated from common link descriptor
1223 	 * pool Rx queue descriptors are not included in this because (REO queue
1224 	 * extension descriptors) they are expected to be allocated contiguously
1225 	 * with REO queue descriptors
1226 	 */
1227 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1228 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1229 
1230 	num_mpdu_queue_descs = num_mpdu_link_descs /
1231 		num_mpdu_links_per_queue_desc;
1232 
1233 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1234 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1235 		num_msdus_per_link_desc;
1236 
1237 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1238 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1239 
1240 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1241 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1242 
1243 	/* Round up to power of 2 */
1244 	total_link_descs = 1;
1245 	while (total_link_descs < num_entries)
1246 		total_link_descs <<= 1;
1247 
1248 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1249 		FL("total_link_descs: %u, link_desc_size: %d"),
1250 		total_link_descs, link_desc_size);
1251 	total_mem_size =  total_link_descs * link_desc_size;
1252 
1253 	total_mem_size += link_desc_align;
1254 
1255 	if (total_mem_size <= max_alloc_size) {
1256 		num_link_desc_banks = 0;
1257 		last_bank_size = total_mem_size;
1258 	} else {
1259 		num_link_desc_banks = (total_mem_size) /
1260 			(max_alloc_size - link_desc_align);
1261 		last_bank_size = total_mem_size %
1262 			(max_alloc_size - link_desc_align);
1263 	}
1264 
1265 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1266 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
1267 		total_mem_size, num_link_desc_banks);
1268 
1269 	for (i = 0; i < num_link_desc_banks; i++) {
1270 		soc->link_desc_banks[i].base_vaddr_unaligned =
1271 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1272 			max_alloc_size,
1273 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1274 		soc->link_desc_banks[i].size = max_alloc_size;
1275 
1276 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1277 			soc->link_desc_banks[i].base_vaddr_unaligned) +
1278 			((unsigned long)(
1279 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1280 			link_desc_align));
1281 
1282 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
1283 			soc->link_desc_banks[i].base_paddr_unaligned) +
1284 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1285 			(unsigned long)(
1286 			soc->link_desc_banks[i].base_vaddr_unaligned));
1287 
1288 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
1289 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1290 				FL("Link descriptor memory alloc failed"));
1291 			goto fail;
1292 		}
1293 	}
1294 
1295 	if (last_bank_size) {
1296 		/* Allocate last bank in case total memory required is not exact
1297 		 * multiple of max_alloc_size
1298 		 */
1299 		soc->link_desc_banks[i].base_vaddr_unaligned =
1300 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1301 			last_bank_size,
1302 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1303 		soc->link_desc_banks[i].size = last_bank_size;
1304 
1305 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1306 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
1307 			((unsigned long)(
1308 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1309 			link_desc_align));
1310 
1311 		soc->link_desc_banks[i].base_paddr =
1312 			(unsigned long)(
1313 			soc->link_desc_banks[i].base_paddr_unaligned) +
1314 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1315 			(unsigned long)(
1316 			soc->link_desc_banks[i].base_vaddr_unaligned));
1317 	}
1318 
1319 
1320 	/* Allocate and setup link descriptor idle list for HW internal use */
1321 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1322 	total_mem_size = entry_size * total_link_descs;
1323 
1324 	if (total_mem_size <= max_alloc_size) {
1325 		void *desc;
1326 
1327 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
1328 			WBM_IDLE_LINK, 0, 0, total_link_descs)) {
1329 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1330 				FL("Link desc idle ring setup failed"));
1331 			goto fail;
1332 		}
1333 
1334 		hal_srng_access_start_unlocked(soc->hal_soc,
1335 			soc->wbm_idle_link_ring.hal_srng);
1336 
1337 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1338 			soc->link_desc_banks[i].base_paddr; i++) {
1339 			uint32_t num_entries = (soc->link_desc_banks[i].size -
1340 				((unsigned long)(
1341 				soc->link_desc_banks[i].base_vaddr) -
1342 				(unsigned long)(
1343 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1344 				/ link_desc_size;
1345 			unsigned long paddr = (unsigned long)(
1346 				soc->link_desc_banks[i].base_paddr);
1347 
1348 			while (num_entries && (desc = hal_srng_src_get_next(
1349 				soc->hal_soc,
1350 				soc->wbm_idle_link_ring.hal_srng))) {
1351 				hal_set_link_desc_addr(desc,
1352 					LINK_DESC_COOKIE(desc_id, i), paddr);
1353 				num_entries--;
1354 				desc_id++;
1355 				paddr += link_desc_size;
1356 			}
1357 		}
1358 		hal_srng_access_end_unlocked(soc->hal_soc,
1359 			soc->wbm_idle_link_ring.hal_srng);
1360 	} else {
1361 		uint32_t num_scatter_bufs;
1362 		uint32_t num_entries_per_buf;
1363 		uint32_t rem_entries;
1364 		uint8_t *scatter_buf_ptr;
1365 		uint16_t scatter_buf_num;
1366 
1367 		soc->wbm_idle_scatter_buf_size =
1368 			hal_idle_list_scatter_buf_size(soc->hal_soc);
1369 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
1370 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
1371 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1372 					soc->hal_soc, total_mem_size,
1373 					soc->wbm_idle_scatter_buf_size);
1374 
1375 		for (i = 0; i < num_scatter_bufs; i++) {
1376 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
1377 				qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1378 				soc->wbm_idle_scatter_buf_size,
1379 				&(soc->wbm_idle_scatter_buf_base_paddr[i]));
1380 			if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
1381 				QDF_TRACE(QDF_MODULE_ID_DP,
1382 					QDF_TRACE_LEVEL_ERROR,
1383 					FL("Scatter list memory alloc failed"));
1384 				goto fail;
1385 			}
1386 		}
1387 
1388 		/* Populate idle list scatter buffers with link descriptor
1389 		 * pointers
1390 		 */
1391 		scatter_buf_num = 0;
1392 		scatter_buf_ptr = (uint8_t *)(
1393 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
1394 		rem_entries = num_entries_per_buf;
1395 
1396 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1397 			soc->link_desc_banks[i].base_paddr; i++) {
1398 			uint32_t num_link_descs =
1399 				(soc->link_desc_banks[i].size -
1400 				((unsigned long)(
1401 				soc->link_desc_banks[i].base_vaddr) -
1402 				(unsigned long)(
1403 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1404 				/ link_desc_size;
1405 			unsigned long paddr = (unsigned long)(
1406 				soc->link_desc_banks[i].base_paddr);
1407 
1408 			while (num_link_descs) {
1409 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
1410 					LINK_DESC_COOKIE(desc_id, i), paddr);
1411 				num_link_descs--;
1412 				desc_id++;
1413 				paddr += link_desc_size;
1414 				rem_entries--;
1415 				if (rem_entries) {
1416 					scatter_buf_ptr += entry_size;
1417 				} else {
1418 					rem_entries = num_entries_per_buf;
1419 					scatter_buf_num++;
1420 
1421 					if (scatter_buf_num >= num_scatter_bufs)
1422 						break;
1423 
1424 					scatter_buf_ptr = (uint8_t *)(
1425 						soc->wbm_idle_scatter_buf_base_vaddr[
1426 						scatter_buf_num]);
1427 				}
1428 			}
1429 		}
1430 		/* Setup link descriptor idle list in HW */
1431 		hal_setup_link_idle_list(soc->hal_soc,
1432 			soc->wbm_idle_scatter_buf_base_paddr,
1433 			soc->wbm_idle_scatter_buf_base_vaddr,
1434 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
1435 			(uint32_t)(scatter_buf_ptr -
1436 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
1437 			scatter_buf_num-1])), total_link_descs);
1438 	}
1439 	return 0;
1440 
1441 fail:
1442 	if (soc->wbm_idle_link_ring.hal_srng) {
1443 		dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
1444 			WBM_IDLE_LINK, 0);
1445 	}
1446 
1447 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1448 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1449 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1450 				soc->wbm_idle_scatter_buf_size,
1451 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1452 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1453 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1454 		}
1455 	}
1456 
1457 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1458 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1459 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1460 				soc->link_desc_banks[i].size,
1461 				soc->link_desc_banks[i].base_vaddr_unaligned,
1462 				soc->link_desc_banks[i].base_paddr_unaligned,
1463 				0);
1464 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1465 		}
1466 	}
1467 	return QDF_STATUS_E_FAILURE;
1468 }
1469 
1470 /*
1471  * Free link descriptor pool that was setup HW
1472  */
1473 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
1474 {
1475 	int i;
1476 
1477 	if (soc->wbm_idle_link_ring.hal_srng) {
1478 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
1479 			WBM_IDLE_LINK, 0);
1480 	}
1481 
1482 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1483 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1484 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1485 				soc->wbm_idle_scatter_buf_size,
1486 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1487 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1488 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1489 		}
1490 	}
1491 
1492 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1493 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1494 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1495 				soc->link_desc_banks[i].size,
1496 				soc->link_desc_banks[i].base_vaddr_unaligned,
1497 				soc->link_desc_banks[i].base_paddr_unaligned,
1498 				0);
1499 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1500 		}
1501 	}
1502 }
1503 
1504 /* TODO: Following should be configurable */
1505 #define WBM_RELEASE_RING_SIZE 64
1506 #define TCL_CMD_RING_SIZE 32
1507 #define TCL_STATUS_RING_SIZE 32
1508 #if defined(QCA_WIFI_QCA6290)
1509 #define REO_DST_RING_SIZE 1024
1510 #else
1511 #define REO_DST_RING_SIZE 2048
1512 #endif
1513 #define REO_REINJECT_RING_SIZE 32
1514 #define RX_RELEASE_RING_SIZE 1024
1515 #define REO_EXCEPTION_RING_SIZE 128
1516 #define REO_CMD_RING_SIZE 64
1517 #define REO_STATUS_RING_SIZE 128
1518 #define RXDMA_BUF_RING_SIZE 1024
1519 #define RXDMA_REFILL_RING_SIZE 4096
1520 #define RXDMA_MONITOR_BUF_RING_SIZE 4096
1521 #define RXDMA_MONITOR_DST_RING_SIZE 2048
1522 #define RXDMA_MONITOR_STATUS_RING_SIZE 1024
1523 #define RXDMA_MONITOR_DESC_RING_SIZE 4096
1524 #define RXDMA_ERR_DST_RING_SIZE 1024
1525 
1526 /*
1527  * dp_wds_aging_timer_fn() - Timer callback function for WDS aging
1528  * @soc: Datapath SOC handle
1529  *
1530  * This is a timer function used to age out stale WDS nodes from
1531  * AST table
1532  */
1533 #ifdef FEATURE_WDS
1534 static void dp_wds_aging_timer_fn(void *soc_hdl)
1535 {
1536 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
1537 	struct dp_pdev *pdev;
1538 	struct dp_vdev *vdev;
1539 	struct dp_peer *peer;
1540 	struct dp_ast_entry *ase, *temp_ase;
1541 	int i;
1542 
1543 	qdf_spin_lock_bh(&soc->ast_lock);
1544 
1545 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1546 		pdev = soc->pdev_list[i];
1547 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1548 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1549 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
1550 					/*
1551 					 * Do not expire static ast entries
1552 					 * and HM WDS entries
1553 					 */
1554 					if (ase->type ==
1555 						CDP_TXRX_AST_TYPE_STATIC ||
1556 						ase->type ==
1557 						CDP_TXRX_AST_TYPE_WDS_HM)
1558 						continue;
1559 
1560 					if (ase->is_active) {
1561 						ase->is_active = FALSE;
1562 						continue;
1563 					}
1564 
1565 					DP_STATS_INC(soc, ast.aged_out, 1);
1566 					dp_peer_del_ast(soc, ase);
1567 				}
1568 			}
1569 		}
1570 
1571 	}
1572 
1573 	qdf_spin_unlock_bh(&soc->ast_lock);
1574 
1575 	if (qdf_atomic_read(&soc->cmn_init_done))
1576 		qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1577 }
1578 
1579 /*
1580  * dp_soc_wds_attach() - Setup WDS timer and AST table
1581  * @soc:		Datapath SOC handle
1582  *
1583  * Return: None
1584  */
1585 static void dp_soc_wds_attach(struct dp_soc *soc)
1586 {
1587 	qdf_timer_init(soc->osdev, &soc->wds_aging_timer,
1588 			dp_wds_aging_timer_fn, (void *)soc,
1589 			QDF_TIMER_TYPE_WAKE_APPS);
1590 
1591 	qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1592 }
1593 
1594 /*
1595  * dp_soc_wds_detach() - Detach WDS data structures and timers
1596  * @txrx_soc: DP SOC handle
1597  *
1598  * Return: None
1599  */
1600 static void dp_soc_wds_detach(struct dp_soc *soc)
1601 {
1602 	qdf_timer_stop(&soc->wds_aging_timer);
1603 	qdf_timer_free(&soc->wds_aging_timer);
1604 }
1605 #else
1606 static void dp_soc_wds_attach(struct dp_soc *soc)
1607 {
1608 }
1609 
1610 static void dp_soc_wds_detach(struct dp_soc *soc)
1611 {
1612 }
1613 #endif
1614 
1615 /*
1616  * dp_soc_reset_ring_map() - Reset cpu ring map
1617  * @soc: Datapath soc handler
1618  *
1619  * This api resets the default cpu ring map
1620  */
1621 
1622 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
1623 {
1624 	uint8_t i;
1625 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1626 
1627 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
1628 		if (nss_config == 1) {
1629 			/*
1630 			 * Setting Tx ring map for one nss offloaded radio
1631 			 */
1632 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
1633 		} else if (nss_config == 2) {
1634 			/*
1635 			 * Setting Tx ring for two nss offloaded radios
1636 			 */
1637 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
1638 		} else {
1639 			/*
1640 			 * Setting Tx ring map for all nss offloaded radios
1641 			 */
1642 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_ALL_RADIO_OFFLOADED_MAP][i];
1643 		}
1644 	}
1645 }
1646 
1647 /*
1648  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
1649  * @dp_soc - DP soc handle
1650  * @ring_type - ring type
1651  * @ring_num - ring_num
1652  *
1653  * return 0 or 1
1654  */
1655 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
1656 {
1657 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1658 	uint8_t status = 0;
1659 
1660 	switch (ring_type) {
1661 	case WBM2SW_RELEASE:
1662 	case REO_DST:
1663 	case RXDMA_BUF:
1664 		status = ((nss_config) & (1 << ring_num));
1665 		break;
1666 	default:
1667 		break;
1668 	}
1669 
1670 	return status;
1671 }
1672 
1673 /*
1674  * dp_soc_reset_intr_mask() - reset interrupt mask
1675  * @dp_soc - DP Soc handle
1676  *
1677  * Return: Return void
1678  */
1679 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
1680 {
1681 	uint8_t j;
1682 	int *grp_mask = NULL;
1683 	int group_number, mask, num_ring;
1684 
1685 	/* number of tx ring */
1686 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
1687 
1688 	/*
1689 	 * group mask for tx completion  ring.
1690 	 */
1691 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1692 
1693 	/* loop and reset the mask for only offloaded ring */
1694 	for (j = 0; j < num_ring; j++) {
1695 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
1696 			continue;
1697 		}
1698 
1699 		/*
1700 		 * Group number corresponding to tx offloaded ring.
1701 		 */
1702 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1703 		if (group_number < 0) {
1704 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1705 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
1706 					WBM2SW_RELEASE, j);
1707 			return;
1708 		}
1709 
1710 		/* reset the tx mask for offloaded ring */
1711 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
1712 		mask &= (~(1 << j));
1713 
1714 		/*
1715 		 * reset the interrupt mask for offloaded ring.
1716 		 */
1717 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1718 	}
1719 
1720 	/* number of rx rings */
1721 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
1722 
1723 	/*
1724 	 * group mask for reo destination ring.
1725 	 */
1726 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1727 
1728 	/* loop and reset the mask for only offloaded ring */
1729 	for (j = 0; j < num_ring; j++) {
1730 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
1731 			continue;
1732 		}
1733 
1734 		/*
1735 		 * Group number corresponding to rx offloaded ring.
1736 		 */
1737 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1738 		if (group_number < 0) {
1739 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1740 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
1741 					REO_DST, j);
1742 			return;
1743 		}
1744 
1745 		/* set the interrupt mask for offloaded ring */
1746 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
1747 		mask &= (~(1 << j));
1748 
1749 		/*
1750 		 * set the interrupt mask to zero for rx offloaded radio.
1751 		 */
1752 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1753 	}
1754 
1755 	/*
1756 	 * group mask for Rx buffer refill ring
1757 	 */
1758 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1759 
1760 	/* loop and reset the mask for only offloaded ring */
1761 	for (j = 0; j < MAX_PDEV_CNT; j++) {
1762 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
1763 			continue;
1764 		}
1765 
1766 		/*
1767 		 * Group number corresponding to rx offloaded ring.
1768 		 */
1769 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1770 		if (group_number < 0) {
1771 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1772 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
1773 					REO_DST, j);
1774 			return;
1775 		}
1776 
1777 		/* set the interrupt mask for offloaded ring */
1778 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1779 				group_number);
1780 		mask &= (~(1 << j));
1781 
1782 		/*
1783 		 * set the interrupt mask to zero for rx offloaded radio.
1784 		 */
1785 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1786 			group_number, mask);
1787 	}
1788 }
1789 
1790 #ifdef IPA_OFFLOAD
1791 /**
1792  * dp_reo_remap_config() - configure reo remap register value based
1793  *                         nss configuration.
1794  *		based on offload_radio value below remap configuration
1795  *		get applied.
1796  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
1797  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
1798  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
1799  *		3 - both Radios handled by NSS (remap not required)
1800  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
1801  *
1802  * @remap1: output parameter indicates reo remap 1 register value
1803  * @remap2: output parameter indicates reo remap 2 register value
1804  * Return: bool type, true if remap is configured else false.
1805  */
1806 static bool dp_reo_remap_config(struct dp_soc *soc,
1807 				uint32_t *remap1,
1808 				uint32_t *remap2)
1809 {
1810 
1811 	*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
1812 		(0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
1813 
1814 	*remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
1815 		(0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
1816 
1817 	return true;
1818 }
1819 #else
1820 static bool dp_reo_remap_config(struct dp_soc *soc,
1821 				uint32_t *remap1,
1822 				uint32_t *remap2)
1823 {
1824 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1825 
1826 	switch (offload_radio) {
1827 	case 0:
1828 		*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
1829 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
1830 			(0x3 << 18) | (0x4 << 21)) << 8;
1831 
1832 		*remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
1833 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
1834 			(0x3 << 18) | (0x4 << 21)) << 8;
1835 		break;
1836 
1837 	case 1:
1838 		*remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
1839 			(0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
1840 			(0x2 << 18) | (0x3 << 21)) << 8;
1841 
1842 		*remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
1843 			(0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
1844 			(0x4 << 18) | (0x2 << 21)) << 8;
1845 		break;
1846 
1847 	case 2:
1848 		*remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
1849 			(0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
1850 			(0x1 << 18) | (0x3 << 21)) << 8;
1851 
1852 		*remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
1853 			(0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
1854 			(0x4 << 18) | (0x1 << 21)) << 8;
1855 		break;
1856 
1857 	case 3:
1858 		/* return false if both radios are offloaded to NSS */
1859 		return false;
1860 	}
1861 	return true;
1862 }
1863 #endif
1864 
1865 /*
1866  * dp_reo_frag_dst_set() - configure reo register to set the
1867  *                        fragment destination ring
1868  * @soc : Datapath soc
1869  * @frag_dst_ring : output parameter to set fragment destination ring
1870  *
1871  * Based on offload_radio below fragment destination rings is selected
1872  * 0 - TCL
1873  * 1 - SW1
1874  * 2 - SW2
1875  * 3 - SW3
1876  * 4 - SW4
1877  * 5 - Release
1878  * 6 - FW
1879  * 7 - alternate select
1880  *
1881  * return: void
1882  */
1883 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
1884 {
1885 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1886 
1887 	switch (offload_radio) {
1888 	case 0:
1889 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
1890 		break;
1891 	case 3:
1892 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
1893 		break;
1894 	default:
1895 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1896 				FL("dp_reo_frag_dst_set invalid offload radio config"));
1897 		break;
1898 	}
1899 }
1900 
1901 /*
1902  * dp_soc_cmn_setup() - Common SoC level initializion
1903  * @soc:		Datapath SOC handle
1904  *
1905  * This is an internal function used to setup common SOC data structures,
1906  * to be called from PDEV attach after receiving HW mode capabilities from FW
1907  */
1908 static int dp_soc_cmn_setup(struct dp_soc *soc)
1909 {
1910 	int i;
1911 	struct hal_reo_params reo_params;
1912 	int tx_ring_size;
1913 	int tx_comp_ring_size;
1914 
1915 	if (qdf_atomic_read(&soc->cmn_init_done))
1916 		return 0;
1917 
1918 	if (dp_peer_find_attach(soc))
1919 		goto fail0;
1920 
1921 	if (dp_hw_link_desc_pool_setup(soc))
1922 		goto fail1;
1923 
1924 	/* Setup SRNG rings */
1925 	/* Common rings */
1926 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
1927 		WBM_RELEASE_RING_SIZE)) {
1928 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1929 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
1930 		goto fail1;
1931 	}
1932 
1933 
1934 	soc->num_tcl_data_rings = 0;
1935 	/* Tx data rings */
1936 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
1937 		soc->num_tcl_data_rings =
1938 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
1939 		tx_comp_ring_size =
1940 			wlan_cfg_tx_comp_ring_size(soc->wlan_cfg_ctx);
1941 		tx_ring_size =
1942 			wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
1943 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
1944 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
1945 				TCL_DATA, i, 0, tx_ring_size)) {
1946 				QDF_TRACE(QDF_MODULE_ID_DP,
1947 					QDF_TRACE_LEVEL_ERROR,
1948 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
1949 				goto fail1;
1950 			}
1951 			/*
1952 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
1953 			 * count
1954 			 */
1955 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
1956 				WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
1957 				QDF_TRACE(QDF_MODULE_ID_DP,
1958 					QDF_TRACE_LEVEL_ERROR,
1959 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
1960 				goto fail1;
1961 			}
1962 		}
1963 	} else {
1964 		/* This will be incremented during per pdev ring setup */
1965 		soc->num_tcl_data_rings = 0;
1966 	}
1967 
1968 	if (dp_tx_soc_attach(soc)) {
1969 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1970 				FL("dp_tx_soc_attach failed"));
1971 		goto fail1;
1972 	}
1973 
1974 	/* TCL command and status rings */
1975 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
1976 		TCL_CMD_RING_SIZE)) {
1977 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1978 			FL("dp_srng_setup failed for tcl_cmd_ring"));
1979 		goto fail1;
1980 	}
1981 
1982 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
1983 		TCL_STATUS_RING_SIZE)) {
1984 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1985 			FL("dp_srng_setup failed for tcl_status_ring"));
1986 		goto fail1;
1987 	}
1988 
1989 
1990 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
1991 	 * descriptors
1992 	 */
1993 
1994 	/* Rx data rings */
1995 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
1996 		soc->num_reo_dest_rings =
1997 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
1998 		QDF_TRACE(QDF_MODULE_ID_DP,
1999 			QDF_TRACE_LEVEL_ERROR,
2000 			FL("num_reo_dest_rings %d\n"), soc->num_reo_dest_rings);
2001 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2002 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2003 				i, 0, REO_DST_RING_SIZE)) {
2004 				QDF_TRACE(QDF_MODULE_ID_DP,
2005 					QDF_TRACE_LEVEL_ERROR,
2006 					FL("dp_srng_setup failed for reo_dest_ring[%d]"), i);
2007 				goto fail1;
2008 			}
2009 		}
2010 	} else {
2011 		/* This will be incremented during per pdev ring setup */
2012 		soc->num_reo_dest_rings = 0;
2013 	}
2014 
2015 	/* LMAC RxDMA to SW Rings configuration */
2016 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2017 		/* Only valid for MCL */
2018 		struct dp_pdev *pdev = soc->pdev_list[0];
2019 
2020 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2021 			if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2022 				RXDMA_DST, 0, i, RXDMA_ERR_DST_RING_SIZE)) {
2023 				QDF_TRACE(QDF_MODULE_ID_DP,
2024 					QDF_TRACE_LEVEL_ERROR,
2025 					FL("dp_srng_setup failed for rxdma_err_dst_ring"));
2026 				goto fail1;
2027 			}
2028 		}
2029 	}
2030 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
2031 
2032 	/* REO reinjection ring */
2033 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2034 		REO_REINJECT_RING_SIZE)) {
2035 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2036 			FL("dp_srng_setup failed for reo_reinject_ring"));
2037 		goto fail1;
2038 	}
2039 
2040 
2041 	/* Rx release ring */
2042 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2043 		RX_RELEASE_RING_SIZE)) {
2044 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2045 			FL("dp_srng_setup failed for rx_rel_ring"));
2046 		goto fail1;
2047 	}
2048 
2049 
2050 	/* Rx exception ring */
2051 	if (dp_srng_setup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0,
2052 		MAX_REO_DEST_RINGS, REO_EXCEPTION_RING_SIZE)) {
2053 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2054 			FL("dp_srng_setup failed for reo_exception_ring"));
2055 		goto fail1;
2056 	}
2057 
2058 
2059 	/* REO command and status rings */
2060 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
2061 		REO_CMD_RING_SIZE)) {
2062 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2063 			FL("dp_srng_setup failed for reo_cmd_ring"));
2064 		goto fail1;
2065 	}
2066 
2067 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2068 	TAILQ_INIT(&soc->rx.reo_cmd_list);
2069 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2070 
2071 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
2072 		REO_STATUS_RING_SIZE)) {
2073 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2074 			FL("dp_srng_setup failed for reo_status_ring"));
2075 		goto fail1;
2076 	}
2077 
2078 	qdf_spinlock_create(&soc->ast_lock);
2079 	dp_soc_wds_attach(soc);
2080 
2081 	/* Reset the cpu ring map if radio is NSS offloaded */
2082 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2083 		dp_soc_reset_cpu_ring_map(soc);
2084 		dp_soc_reset_intr_mask(soc);
2085 	}
2086 
2087 	/* Setup HW REO */
2088 	qdf_mem_zero(&reo_params, sizeof(reo_params));
2089 
2090 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2091 
2092 		/*
2093 		 * Reo ring remap is not required if both radios
2094 		 * are offloaded to NSS
2095 		 */
2096 		if (!dp_reo_remap_config(soc,
2097 					&reo_params.remap1,
2098 					&reo_params.remap2))
2099 			goto out;
2100 
2101 		reo_params.rx_hash_enabled = true;
2102 	}
2103 
2104 	/* setup the global rx defrag waitlist */
2105 	TAILQ_INIT(&soc->rx.defrag.waitlist);
2106 	soc->rx.defrag.timeout_ms =
2107 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
2108 	soc->rx.flags.defrag_timeout_check =
2109 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
2110 
2111 out:
2112 	/*
2113 	 * set the fragment destination ring
2114 	 */
2115 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2116 
2117 	hal_reo_setup(soc->hal_soc, &reo_params);
2118 
2119 	qdf_atomic_set(&soc->cmn_init_done, 1);
2120 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
2121 	return 0;
2122 fail1:
2123 	/*
2124 	 * Cleanup will be done as part of soc_detach, which will
2125 	 * be called on pdev attach failure
2126 	 */
2127 fail0:
2128 	return QDF_STATUS_E_FAILURE;
2129 }
2130 
2131 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
2132 
2133 static void dp_lro_hash_setup(struct dp_soc *soc)
2134 {
2135 	struct cdp_lro_hash_config lro_hash;
2136 
2137 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2138 		!wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2139 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2140 			 FL("LRO disabled RX hash disabled"));
2141 		return;
2142 	}
2143 
2144 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2145 
2146 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
2147 		lro_hash.lro_enable = 1;
2148 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2149 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
2150 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2151 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
2152 	}
2153 
2154 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, FL("enabled"));
2155 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
2156 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2157 		 LRO_IPV4_SEED_ARR_SZ));
2158 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2159 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2160 		 LRO_IPV6_SEED_ARR_SZ));
2161 
2162 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2163 		 "lro_hash: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
2164 		 lro_hash.lro_enable, lro_hash.tcp_flag,
2165 		 lro_hash.tcp_flag_mask);
2166 
2167 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2168 		 QDF_TRACE_LEVEL_ERROR,
2169 		 (void *)lro_hash.toeplitz_hash_ipv4,
2170 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2171 		 LRO_IPV4_SEED_ARR_SZ));
2172 
2173 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2174 		 QDF_TRACE_LEVEL_ERROR,
2175 		 (void *)lro_hash.toeplitz_hash_ipv6,
2176 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2177 		 LRO_IPV6_SEED_ARR_SZ));
2178 
2179 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2180 
2181 	if (soc->cdp_soc.ol_ops->lro_hash_config)
2182 		(void)soc->cdp_soc.ol_ops->lro_hash_config
2183 			(soc->ctrl_psoc, &lro_hash);
2184 }
2185 
2186 /*
2187 * dp_rxdma_ring_setup() - configure the RX DMA rings
2188 * @soc: data path SoC handle
2189 * @pdev: Physical device handle
2190 *
2191 * Return: 0 - success, > 0 - failure
2192 */
2193 #ifdef QCA_HOST2FW_RXBUF_RING
2194 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2195 	 struct dp_pdev *pdev)
2196 {
2197 	int max_mac_rings =
2198 		 wlan_cfg_get_num_mac_rings
2199 			(pdev->wlan_cfg_ctx);
2200 	int i;
2201 
2202 	for (i = 0; i < max_mac_rings; i++) {
2203 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2204 			 "%s: pdev_id %d mac_id %d\n",
2205 			 __func__, pdev->pdev_id, i);
2206 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
2207 			 RXDMA_BUF, 1, i, RXDMA_BUF_RING_SIZE)) {
2208 			QDF_TRACE(QDF_MODULE_ID_DP,
2209 				 QDF_TRACE_LEVEL_ERROR,
2210 				 FL("failed rx mac ring setup"));
2211 			return QDF_STATUS_E_FAILURE;
2212 		}
2213 	}
2214 	return QDF_STATUS_SUCCESS;
2215 }
2216 #else
2217 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2218 	 struct dp_pdev *pdev)
2219 {
2220 	return QDF_STATUS_SUCCESS;
2221 }
2222 #endif
2223 
2224 /**
2225  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2226  * @pdev - DP_PDEV handle
2227  *
2228  * Return: void
2229  */
2230 static inline void
2231 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2232 {
2233 	uint8_t map_id;
2234 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2235 		qdf_mem_copy(pdev->dscp_tid_map[map_id], default_dscp_tid_map,
2236 				sizeof(default_dscp_tid_map));
2237 	}
2238 	for (map_id = 0; map_id < HAL_MAX_HW_DSCP_TID_MAPS; map_id++) {
2239 		hal_tx_set_dscp_tid_map(pdev->soc->hal_soc,
2240 				pdev->dscp_tid_map[map_id],
2241 				map_id);
2242 	}
2243 }
2244 
2245 #ifdef QCA_SUPPORT_SON
2246 /**
2247  * dp_mark_peer_inact(): Update peer inactivity status
2248  * @peer_handle - datapath peer handle
2249  *
2250  * Return: void
2251  */
2252 void dp_mark_peer_inact(void *peer_handle, bool inactive)
2253 {
2254 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2255 	struct dp_pdev *pdev;
2256 	struct dp_soc *soc;
2257 	bool inactive_old;
2258 
2259 	if (!peer)
2260 		return;
2261 
2262 	pdev = peer->vdev->pdev;
2263 	soc = pdev->soc;
2264 
2265 	inactive_old = peer->peer_bs_inact_flag == 1;
2266 	if (!inactive)
2267 		peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2268 	peer->peer_bs_inact_flag = inactive ? 1 : 0;
2269 
2270 	if (inactive_old != inactive) {
2271 		/**
2272 		 * Note: a node lookup can happen in RX datapath context
2273 		 * when a node changes from inactive to active (at most once
2274 		 * per inactivity timeout threshold)
2275 		 */
2276 		if (soc->cdp_soc.ol_ops->record_act_change) {
2277 			soc->cdp_soc.ol_ops->record_act_change(pdev->osif_pdev,
2278 					peer->mac_addr.raw, !inactive);
2279 		}
2280 	}
2281 }
2282 
2283 /**
2284  * dp_txrx_peer_find_inact_timeout_handler(): Inactivity timeout function
2285  *
2286  * Periodically checks the inactivity status
2287  */
2288 static os_timer_func(dp_txrx_peer_find_inact_timeout_handler)
2289 {
2290 	struct dp_pdev *pdev;
2291 	struct dp_vdev *vdev;
2292 	struct dp_peer *peer;
2293 	struct dp_soc *soc;
2294 	int i;
2295 
2296 	OS_GET_TIMER_ARG(soc, struct dp_soc *);
2297 
2298 	qdf_spin_lock(&soc->peer_ref_mutex);
2299 
2300 	for (i = 0; i < soc->pdev_count; i++) {
2301 	pdev = soc->pdev_list[i];
2302 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2303 		if (vdev->opmode != wlan_op_mode_ap)
2304 			continue;
2305 
2306 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2307 			if (!peer->authorize) {
2308 				/**
2309 				 * Inactivity check only interested in
2310 				 * connected node
2311 				 */
2312 				continue;
2313 			}
2314 			if (peer->peer_bs_inact > soc->pdev_bs_inact_reload) {
2315 				/**
2316 				 * This check ensures we do not wait extra long
2317 				 * due to the potential race condition
2318 				 */
2319 				peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2320 			}
2321 			if (peer->peer_bs_inact > 0) {
2322 				/* Do not let it wrap around */
2323 				peer->peer_bs_inact--;
2324 			}
2325 			if (peer->peer_bs_inact == 0)
2326 				dp_mark_peer_inact(peer, true);
2327 		}
2328 	}
2329 	}
2330 
2331 	qdf_spin_unlock(&soc->peer_ref_mutex);
2332 	qdf_timer_mod(&soc->pdev_bs_inact_timer,
2333 		      soc->pdev_bs_inact_interval * 1000);
2334 }
2335 
2336 
2337 /**
2338  * dp_free_inact_timer(): free inact timer
2339  * @timer - inact timer handle
2340  *
2341  * Return: bool
2342  */
2343 void dp_free_inact_timer(struct dp_soc *soc)
2344 {
2345 	qdf_timer_free(&soc->pdev_bs_inact_timer);
2346 }
2347 #else
2348 
2349 void dp_mark_peer_inact(void *peer, bool inactive)
2350 {
2351 	return;
2352 }
2353 
2354 void dp_free_inact_timer(struct dp_soc *soc)
2355 {
2356 	return;
2357 }
2358 
2359 #endif
2360 
2361 #ifdef IPA_OFFLOAD
2362 /**
2363  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
2364  * @soc: data path instance
2365  * @pdev: core txrx pdev context
2366  *
2367  * Return: QDF_STATUS_SUCCESS: success
2368  *         QDF_STATUS_E_RESOURCES: Error return
2369  */
2370 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2371 					   struct dp_pdev *pdev)
2372 {
2373 	/* Setup second Rx refill buffer ring */
2374 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2375 			  IPA_RX_REFILL_BUF_RING_IDX,
2376 			  pdev->pdev_id, RXDMA_REFILL_RING_SIZE)) {
2377 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2378 			FL("dp_srng_setup failed second rx refill ring"));
2379 		return QDF_STATUS_E_FAILURE;
2380 	}
2381 	return QDF_STATUS_SUCCESS;
2382 }
2383 
2384 /**
2385  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
2386  * @soc: data path instance
2387  * @pdev: core txrx pdev context
2388  *
2389  * Return: void
2390  */
2391 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2392 					      struct dp_pdev *pdev)
2393 {
2394 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2395 			IPA_RX_REFILL_BUF_RING_IDX);
2396 }
2397 
2398 #else
2399 
2400 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2401 					   struct dp_pdev *pdev)
2402 {
2403 	return QDF_STATUS_SUCCESS;
2404 }
2405 
2406 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2407 					      struct dp_pdev *pdev)
2408 {
2409 }
2410 
2411 #endif
2412 
2413 /*
2414 * dp_pdev_attach_wifi3() - attach txrx pdev
2415 * @ctrl_pdev: Opaque PDEV object
2416 * @txrx_soc: Datapath SOC handle
2417 * @htc_handle: HTC handle for host-target interface
2418 * @qdf_osdev: QDF OS device
2419 * @pdev_id: PDEV ID
2420 *
2421 * Return: DP PDEV handle on success, NULL on failure
2422 */
2423 static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
2424 	struct cdp_cfg *ctrl_pdev,
2425 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
2426 {
2427 	int tx_ring_size;
2428 	int tx_comp_ring_size;
2429 
2430 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2431 	struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
2432 
2433 	if (!pdev) {
2434 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2435 			FL("DP PDEV memory allocation failed"));
2436 		goto fail0;
2437 	}
2438 
2439 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach();
2440 
2441 	if (!pdev->wlan_cfg_ctx) {
2442 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2443 			FL("pdev cfg_attach failed"));
2444 
2445 		qdf_mem_free(pdev);
2446 		goto fail0;
2447 	}
2448 
2449 	/*
2450 	 * set nss pdev config based on soc config
2451 	 */
2452 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
2453 			(wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx) & (1 << pdev_id)));
2454 
2455 	pdev->soc = soc;
2456 	pdev->osif_pdev = ctrl_pdev;
2457 	pdev->pdev_id = pdev_id;
2458 	soc->pdev_list[pdev_id] = pdev;
2459 	soc->pdev_count++;
2460 
2461 	TAILQ_INIT(&pdev->vdev_list);
2462 	pdev->vdev_count = 0;
2463 
2464 	qdf_spinlock_create(&pdev->tx_mutex);
2465 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
2466 	TAILQ_INIT(&pdev->neighbour_peers_list);
2467 
2468 	if (dp_soc_cmn_setup(soc)) {
2469 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2470 			FL("dp_soc_cmn_setup failed"));
2471 		goto fail1;
2472 	}
2473 
2474 	/* Setup per PDEV TCL rings if configured */
2475 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2476 		tx_ring_size =
2477 			wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
2478 		tx_comp_ring_size =
2479 			wlan_cfg_tx_comp_ring_size(soc->wlan_cfg_ctx);
2480 
2481 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
2482 			pdev_id, pdev_id, tx_ring_size)) {
2483 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2484 				FL("dp_srng_setup failed for tcl_data_ring"));
2485 			goto fail1;
2486 		}
2487 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
2488 			WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
2489 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2490 				FL("dp_srng_setup failed for tx_comp_ring"));
2491 			goto fail1;
2492 		}
2493 		soc->num_tcl_data_rings++;
2494 	}
2495 
2496 	/* Tx specific init */
2497 	if (dp_tx_pdev_attach(pdev)) {
2498 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2499 			FL("dp_tx_pdev_attach failed"));
2500 		goto fail1;
2501 	}
2502 
2503 	/* Setup per PDEV REO rings if configured */
2504 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
2505 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
2506 			pdev_id, pdev_id, REO_DST_RING_SIZE)) {
2507 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2508 				FL("dp_srng_setup failed for reo_dest_ringn"));
2509 			goto fail1;
2510 		}
2511 		soc->num_reo_dest_rings++;
2512 
2513 	}
2514 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
2515 		RXDMA_REFILL_RING_SIZE)) {
2516 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2517 			 FL("dp_srng_setup failed rx refill ring"));
2518 		goto fail1;
2519 	}
2520 
2521 	if (dp_rxdma_ring_setup(soc, pdev)) {
2522 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2523 			 FL("RXDMA ring config failed"));
2524 		goto fail1;
2525 	}
2526 
2527 	if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring, RXDMA_MONITOR_BUF, 0,
2528 		pdev_id, RXDMA_MONITOR_BUF_RING_SIZE)) {
2529 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2530 			FL("dp_srng_setup failed for rxdma_mon_buf_ring"));
2531 		goto fail1;
2532 	}
2533 
2534 	if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring, RXDMA_MONITOR_DST, 0,
2535 		pdev_id, RXDMA_MONITOR_DST_RING_SIZE)) {
2536 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2537 			FL("dp_srng_setup failed for rxdma_mon_dst_ring"));
2538 		goto fail1;
2539 	}
2540 
2541 
2542 	if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring,
2543 		RXDMA_MONITOR_STATUS, 0, pdev_id,
2544 		RXDMA_MONITOR_STATUS_RING_SIZE)) {
2545 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2546 			FL("dp_srng_setup failed for rxdma_mon_status_ring"));
2547 		goto fail1;
2548 	}
2549 
2550 	if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring,
2551 		RXDMA_MONITOR_DESC, 0, pdev_id, RXDMA_MONITOR_DESC_RING_SIZE)) {
2552 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2553 			"dp_srng_setup failed for rxdma_mon_desc_ring\n");
2554 		goto fail1;
2555 	}
2556 
2557 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2558 		if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
2559 				  0, pdev_id, RXDMA_ERR_DST_RING_SIZE)) {
2560 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2561 				FL("dp_srng_setup failed for rxdma_err_dst_ring"));
2562 			goto fail1;
2563 		}
2564 	}
2565 
2566 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
2567 		goto fail1;
2568 
2569 	if (dp_ipa_ring_resource_setup(soc, pdev))
2570 		goto fail1;
2571 
2572 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
2573 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2574 			FL("dp_ipa_uc_attach failed"));
2575 		goto fail1;
2576 	}
2577 
2578 	/* Rx specific init */
2579 	if (dp_rx_pdev_attach(pdev)) {
2580 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2581 			FL("dp_rx_pdev_attach failed"));
2582 		goto fail0;
2583 	}
2584 	DP_STATS_INIT(pdev);
2585 
2586 	/* Monitor filter init */
2587 	pdev->mon_filter_mode = MON_FILTER_ALL;
2588 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
2589 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
2590 	pdev->fp_data_filter = FILTER_DATA_ALL;
2591 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
2592 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
2593 	pdev->mo_data_filter = FILTER_DATA_ALL;
2594 
2595 #ifndef CONFIG_WIN
2596 	/* MCL */
2597 	dp_local_peer_id_pool_init(pdev);
2598 #endif
2599 	dp_dscp_tid_map_setup(pdev);
2600 
2601 	/* Rx monitor mode specific init */
2602 	if (dp_rx_pdev_mon_attach(pdev)) {
2603 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2604 				"dp_rx_pdev_attach failed\n");
2605 		goto fail1;
2606 	}
2607 
2608 	if (dp_wdi_event_attach(pdev)) {
2609 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2610 				"dp_wdi_evet_attach failed\n");
2611 		goto fail1;
2612 	}
2613 
2614 	/* set the reo destination during initialization */
2615 	pdev->reo_dest = pdev->pdev_id + 1;
2616 
2617 	return (struct cdp_pdev *)pdev;
2618 
2619 fail1:
2620 	dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
2621 
2622 fail0:
2623 	return NULL;
2624 }
2625 
2626 /*
2627 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
2628 * @soc: data path SoC handle
2629 * @pdev: Physical device handle
2630 *
2631 * Return: void
2632 */
2633 #ifdef QCA_HOST2FW_RXBUF_RING
2634 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
2635 	 struct dp_pdev *pdev)
2636 {
2637 	int max_mac_rings =
2638 		 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
2639 	int i;
2640 
2641 	max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
2642 				max_mac_rings : MAX_RX_MAC_RINGS;
2643 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
2644 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
2645 			 RXDMA_BUF, 1);
2646 
2647 	qdf_timer_free(&soc->mon_reap_timer);
2648 }
2649 #else
2650 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
2651 	 struct dp_pdev *pdev)
2652 {
2653 }
2654 #endif
2655 
2656 /*
2657  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
2658  * @pdev: device object
2659  *
2660  * Return: void
2661  */
2662 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
2663 {
2664 	struct dp_neighbour_peer *peer = NULL;
2665 	struct dp_neighbour_peer *temp_peer = NULL;
2666 
2667 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
2668 			neighbour_peer_list_elem, temp_peer) {
2669 		/* delete this peer from the list */
2670 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
2671 				peer, neighbour_peer_list_elem);
2672 		qdf_mem_free(peer);
2673 	}
2674 
2675 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
2676 }
2677 
2678 /*
2679 * dp_pdev_detach_wifi3() - detach txrx pdev
2680 * @txrx_pdev: Datapath PDEV handle
2681 * @force: Force detach
2682 *
2683 */
2684 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
2685 {
2686 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
2687 	struct dp_soc *soc = pdev->soc;
2688 	qdf_nbuf_t curr_nbuf, next_nbuf;
2689 
2690 	dp_wdi_event_detach(pdev);
2691 
2692 	dp_tx_pdev_detach(pdev);
2693 
2694 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2695 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
2696 			TCL_DATA, pdev->pdev_id);
2697 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
2698 			WBM2SW_RELEASE, pdev->pdev_id);
2699 	}
2700 
2701 	dp_pktlogmod_exit(pdev);
2702 
2703 	dp_rx_pdev_detach(pdev);
2704 
2705 	dp_rx_pdev_mon_detach(pdev);
2706 
2707 	dp_neighbour_peers_detach(pdev);
2708 	qdf_spinlock_destroy(&pdev->tx_mutex);
2709 
2710 	dp_ipa_uc_detach(soc, pdev);
2711 
2712 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
2713 
2714 	/* Cleanup per PDEV REO rings if configured */
2715 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
2716 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
2717 			REO_DST, pdev->pdev_id);
2718 	}
2719 
2720 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
2721 
2722 	dp_rxdma_ring_cleanup(soc, pdev);
2723 
2724 	dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring, RXDMA_MONITOR_BUF, 0);
2725 
2726 	dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring, RXDMA_MONITOR_DST, 0);
2727 
2728 	dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring,
2729 		RXDMA_MONITOR_STATUS, 0);
2730 
2731 	dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring,
2732 		RXDMA_MONITOR_DESC, 0);
2733 
2734 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2735 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST, 0);
2736 	} else {
2737 		int i;
2738 
2739 		for (i = 0; i < MAX_RX_MAC_RINGS; i++)
2740 			dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[i],
2741 				RXDMA_DST, 0);
2742 	}
2743 
2744 	curr_nbuf = pdev->invalid_peer_head_msdu;
2745 	while (curr_nbuf) {
2746 		next_nbuf = qdf_nbuf_next(curr_nbuf);
2747 		qdf_nbuf_free(curr_nbuf);
2748 		curr_nbuf = next_nbuf;
2749 	}
2750 
2751 	soc->pdev_list[pdev->pdev_id] = NULL;
2752 	soc->pdev_count--;
2753 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
2754 	qdf_mem_free(pdev->dp_txrx_handle);
2755 	qdf_mem_free(pdev);
2756 }
2757 
2758 /*
2759  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
2760  * @soc: DP SOC handle
2761  */
2762 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
2763 {
2764 	struct reo_desc_list_node *desc;
2765 	struct dp_rx_tid *rx_tid;
2766 
2767 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2768 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
2769 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
2770 		rx_tid = &desc->rx_tid;
2771 		qdf_mem_unmap_nbytes_single(soc->osdev,
2772 			rx_tid->hw_qdesc_paddr,
2773 			QDF_DMA_BIDIRECTIONAL,
2774 			rx_tid->hw_qdesc_alloc_size);
2775 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2776 		qdf_mem_free(desc);
2777 	}
2778 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2779 	qdf_list_destroy(&soc->reo_desc_freelist);
2780 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
2781 }
2782 
2783 /*
2784  * dp_soc_detach_wifi3() - Detach txrx SOC
2785  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
2786  */
2787 static void dp_soc_detach_wifi3(void *txrx_soc)
2788 {
2789 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2790 	int i;
2791 
2792 	qdf_atomic_set(&soc->cmn_init_done, 0);
2793 
2794 	qdf_flush_work(&soc->htt_stats.work);
2795 	qdf_disable_work(&soc->htt_stats.work);
2796 
2797 	/* Free pending htt stats messages */
2798 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
2799 
2800 	dp_free_inact_timer(soc);
2801 
2802 	for (i = 0; i < MAX_PDEV_CNT; i++) {
2803 		if (soc->pdev_list[i])
2804 			dp_pdev_detach_wifi3(
2805 				(struct cdp_pdev *)soc->pdev_list[i], 1);
2806 	}
2807 
2808 	dp_peer_find_detach(soc);
2809 
2810 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
2811 	 * SW descriptors
2812 	 */
2813 
2814 	/* Free the ring memories */
2815 	/* Common rings */
2816 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
2817 
2818 	dp_tx_soc_detach(soc);
2819 	/* Tx data rings */
2820 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2821 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2822 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
2823 				TCL_DATA, i);
2824 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
2825 				WBM2SW_RELEASE, i);
2826 		}
2827 	}
2828 
2829 	/* TCL command and status rings */
2830 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
2831 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
2832 
2833 	/* Rx data rings */
2834 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
2835 		soc->num_reo_dest_rings =
2836 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2837 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2838 			/* TODO: Get number of rings and ring sizes
2839 			 * from wlan_cfg
2840 			 */
2841 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
2842 				REO_DST, i);
2843 		}
2844 	}
2845 	/* REO reinjection ring */
2846 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
2847 
2848 	/* Rx release ring */
2849 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
2850 
2851 	/* Rx exception ring */
2852 	/* TODO: Better to store ring_type and ring_num in
2853 	 * dp_srng during setup
2854 	 */
2855 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
2856 
2857 	/* REO command and status rings */
2858 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
2859 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
2860 	dp_hw_link_desc_pool_cleanup(soc);
2861 
2862 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
2863 	qdf_spinlock_destroy(&soc->htt_stats.lock);
2864 
2865 	htt_soc_detach(soc->htt_handle);
2866 
2867 	dp_reo_cmdlist_destroy(soc);
2868 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
2869 	dp_reo_desc_freelist_destroy(soc);
2870 
2871 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
2872 
2873 	dp_soc_wds_detach(soc);
2874 	qdf_spinlock_destroy(&soc->ast_lock);
2875 
2876 	qdf_mem_free(soc);
2877 }
2878 
2879 /*
2880  * dp_rxdma_ring_config() - configure the RX DMA rings
2881  *
2882  * This function is used to configure the MAC rings.
2883  * On MCL host provides buffers in Host2FW ring
2884  * FW refills (copies) buffers to the ring and updates
2885  * ring_idx in register
2886  *
2887  * @soc: data path SoC handle
2888  *
2889  * Return: void
2890  */
2891 #ifdef QCA_HOST2FW_RXBUF_RING
2892 static void dp_rxdma_ring_config(struct dp_soc *soc)
2893 {
2894 	int i;
2895 
2896 	for (i = 0; i < MAX_PDEV_CNT; i++) {
2897 		struct dp_pdev *pdev = soc->pdev_list[i];
2898 
2899 		if (pdev) {
2900 			int mac_id = 0;
2901 			int j;
2902 			bool dbs_enable = 0;
2903 			int max_mac_rings =
2904 				 wlan_cfg_get_num_mac_rings
2905 				(pdev->wlan_cfg_ctx);
2906 
2907 			htt_srng_setup(soc->htt_handle, 0,
2908 				 pdev->rx_refill_buf_ring.hal_srng,
2909 				 RXDMA_BUF);
2910 
2911 			if (pdev->rx_refill_buf_ring2.hal_srng)
2912 				htt_srng_setup(soc->htt_handle, 0,
2913 					pdev->rx_refill_buf_ring2.hal_srng,
2914 					RXDMA_BUF);
2915 
2916 			if (soc->cdp_soc.ol_ops->
2917 				is_hw_dbs_2x2_capable) {
2918 				dbs_enable = soc->cdp_soc.ol_ops->
2919 					is_hw_dbs_2x2_capable(soc->ctrl_psoc);
2920 			}
2921 
2922 			if (dbs_enable) {
2923 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2924 				QDF_TRACE_LEVEL_ERROR,
2925 				FL("DBS enabled max_mac_rings %d\n"),
2926 					 max_mac_rings);
2927 			} else {
2928 				max_mac_rings = 1;
2929 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2930 					 QDF_TRACE_LEVEL_ERROR,
2931 					 FL("DBS disabled, max_mac_rings %d\n"),
2932 					 max_mac_rings);
2933 			}
2934 
2935 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2936 					 FL("pdev_id %d max_mac_rings %d\n"),
2937 					 pdev->pdev_id, max_mac_rings);
2938 
2939 			for (j = 0; j < max_mac_rings; j++) {
2940 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2941 					 QDF_TRACE_LEVEL_ERROR,
2942 					 FL("mac_id %d\n"), mac_id);
2943 				htt_srng_setup(soc->htt_handle, mac_id,
2944 					 pdev->rx_mac_buf_ring[j]
2945 						.hal_srng,
2946 					 RXDMA_BUF);
2947 				htt_srng_setup(soc->htt_handle, mac_id,
2948 					pdev->rxdma_err_dst_ring[j]
2949 						.hal_srng,
2950 					RXDMA_DST);
2951 				mac_id++;
2952 			}
2953 
2954 			/* Configure monitor mode rings */
2955 			htt_srng_setup(soc->htt_handle, i,
2956 					pdev->rxdma_mon_buf_ring.hal_srng,
2957 					RXDMA_MONITOR_BUF);
2958 
2959 			htt_srng_setup(soc->htt_handle, i,
2960 					pdev->rxdma_mon_dst_ring.hal_srng,
2961 					RXDMA_MONITOR_DST);
2962 
2963 			htt_srng_setup(soc->htt_handle, i,
2964 				pdev->rxdma_mon_status_ring.hal_srng,
2965 				RXDMA_MONITOR_STATUS);
2966 
2967 			htt_srng_setup(soc->htt_handle, i,
2968 				pdev->rxdma_mon_desc_ring.hal_srng,
2969 				RXDMA_MONITOR_DESC);
2970 		}
2971 	}
2972 
2973 	/*
2974 	 * Timer to reap rxdma status rings.
2975 	 * Needed until we enable ppdu end interrupts
2976 	 */
2977 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
2978 			dp_service_mon_rings, (void *)soc,
2979 			QDF_TIMER_TYPE_WAKE_APPS);
2980 	soc->reap_timer_init = 1;
2981 }
2982 #else
2983 static void dp_rxdma_ring_config(struct dp_soc *soc)
2984 {
2985 	int i;
2986 
2987 	for (i = 0; i < MAX_PDEV_CNT; i++) {
2988 		struct dp_pdev *pdev = soc->pdev_list[i];
2989 
2990 		if (pdev) {
2991 			int ring_idx = dp_get_ring_id_for_mac_id(soc, i);
2992 
2993 			htt_srng_setup(soc->htt_handle, i,
2994 				pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
2995 
2996 			htt_srng_setup(soc->htt_handle, i,
2997 					pdev->rxdma_mon_buf_ring.hal_srng,
2998 					RXDMA_MONITOR_BUF);
2999 			htt_srng_setup(soc->htt_handle, i,
3000 					pdev->rxdma_mon_dst_ring.hal_srng,
3001 					RXDMA_MONITOR_DST);
3002 			htt_srng_setup(soc->htt_handle, i,
3003 				pdev->rxdma_mon_status_ring.hal_srng,
3004 				RXDMA_MONITOR_STATUS);
3005 			htt_srng_setup(soc->htt_handle, i,
3006 				pdev->rxdma_mon_desc_ring.hal_srng,
3007 				RXDMA_MONITOR_DESC);
3008 			htt_srng_setup(soc->htt_handle, i,
3009 				pdev->rxdma_err_dst_ring[ring_idx].hal_srng,
3010 				RXDMA_DST);
3011 		}
3012 	}
3013 }
3014 #endif
3015 
3016 /*
3017  * dp_soc_attach_target_wifi3() - SOC initialization in the target
3018  * @txrx_soc: Datapath SOC handle
3019  */
3020 static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
3021 {
3022 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
3023 
3024 	htt_soc_attach_target(soc->htt_handle);
3025 
3026 	dp_rxdma_ring_config(soc);
3027 
3028 	DP_STATS_INIT(soc);
3029 
3030 	/* initialize work queue for stats processing */
3031 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
3032 
3033 	return 0;
3034 }
3035 
3036 /*
3037  * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
3038  * @txrx_soc: Datapath SOC handle
3039  */
3040 static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
3041 {
3042 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3043 	return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
3044 }
3045 /*
3046  * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
3047  * @txrx_soc: Datapath SOC handle
3048  * @nss_cfg: nss config
3049  */
3050 static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
3051 {
3052 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3053 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
3054 
3055 	wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
3056 
3057 	/*
3058 	 * TODO: masked out based on the per offloaded radio
3059 	 */
3060 	if (config == dp_nss_cfg_dbdc) {
3061 		wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
3062 		wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
3063 		wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
3064 		wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
3065 	}
3066 
3067 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3068 				FL("nss-wifi<0> nss config is enabled"));
3069 }
3070 /*
3071 * dp_vdev_attach_wifi3() - attach txrx vdev
3072 * @txrx_pdev: Datapath PDEV handle
3073 * @vdev_mac_addr: MAC address of the virtual interface
3074 * @vdev_id: VDEV Id
3075 * @wlan_op_mode: VDEV operating mode
3076 *
3077 * Return: DP VDEV handle on success, NULL on failure
3078 */
3079 static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
3080 	uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
3081 {
3082 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3083 	struct dp_soc *soc = pdev->soc;
3084 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
3085 	int tx_ring_size;
3086 
3087 	if (!vdev) {
3088 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3089 			FL("DP VDEV memory allocation failed"));
3090 		goto fail0;
3091 	}
3092 
3093 	vdev->pdev = pdev;
3094 	vdev->vdev_id = vdev_id;
3095 	vdev->opmode = op_mode;
3096 	vdev->osdev = soc->osdev;
3097 
3098 	vdev->osif_rx = NULL;
3099 	vdev->osif_rsim_rx_decap = NULL;
3100 	vdev->osif_get_key = NULL;
3101 	vdev->osif_rx_mon = NULL;
3102 	vdev->osif_tx_free_ext = NULL;
3103 	vdev->osif_vdev = NULL;
3104 
3105 	vdev->delete.pending = 0;
3106 	vdev->safemode = 0;
3107 	vdev->drop_unenc = 1;
3108 	vdev->sec_type = cdp_sec_type_none;
3109 #ifdef notyet
3110 	vdev->filters_num = 0;
3111 #endif
3112 
3113 	qdf_mem_copy(
3114 		&vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3115 
3116 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3117 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3118 	vdev->dscp_tid_map_id = 0;
3119 	vdev->mcast_enhancement_en = 0;
3120 	tx_ring_size = wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
3121 
3122 	/* TODO: Initialize default HTT meta data that will be used in
3123 	 * TCL descriptors for packets transmitted from this VDEV
3124 	 */
3125 
3126 	TAILQ_INIT(&vdev->peer_list);
3127 
3128 	/* add this vdev into the pdev's list */
3129 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
3130 	pdev->vdev_count++;
3131 
3132 	dp_tx_vdev_attach(vdev);
3133 
3134 	if (QDF_STATUS_SUCCESS != dp_tx_flow_pool_map_handler(pdev, vdev_id,
3135 					FLOW_TYPE_VDEV, vdev_id, tx_ring_size))
3136 		goto fail1;
3137 
3138 
3139 	if ((soc->intr_mode == DP_INTR_POLL) &&
3140 			wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
3141 		if (pdev->vdev_count == 1)
3142 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
3143 	}
3144 
3145 	dp_lro_hash_setup(soc);
3146 
3147 	/* LRO */
3148 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3149 		wlan_op_mode_sta == vdev->opmode)
3150 		vdev->lro_enable = true;
3151 
3152 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3153 		 "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable);
3154 
3155 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3156 		"Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
3157 	DP_STATS_INIT(vdev);
3158 
3159 	if (wlan_op_mode_sta == vdev->opmode)
3160 		dp_peer_create_wifi3((struct cdp_vdev *)vdev,
3161 							vdev->mac_addr.raw);
3162 
3163 	return (struct cdp_vdev *)vdev;
3164 
3165 fail1:
3166 	dp_tx_vdev_detach(vdev);
3167 	qdf_mem_free(vdev);
3168 fail0:
3169 	return NULL;
3170 }
3171 
3172 /**
3173  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
3174  * @vdev: Datapath VDEV handle
3175  * @osif_vdev: OSIF vdev handle
3176  * @txrx_ops: Tx and Rx operations
3177  *
3178  * Return: DP VDEV handle on success, NULL on failure
3179  */
3180 static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
3181 	void *osif_vdev,
3182 	struct ol_txrx_ops *txrx_ops)
3183 {
3184 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3185 	vdev->osif_vdev = osif_vdev;
3186 	vdev->osif_rx = txrx_ops->rx.rx;
3187 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
3188 	vdev->osif_get_key = txrx_ops->get_key;
3189 	vdev->osif_rx_mon = txrx_ops->rx.mon;
3190 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
3191 #ifdef notyet
3192 #if ATH_SUPPORT_WAPI
3193 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
3194 #endif
3195 #endif
3196 #ifdef UMAC_SUPPORT_PROXY_ARP
3197 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
3198 #endif
3199 	vdev->me_convert = txrx_ops->me_convert;
3200 
3201 	/* TODO: Enable the following once Tx code is integrated */
3202 	if (vdev->mesh_vdev)
3203 		txrx_ops->tx.tx = dp_tx_send_mesh;
3204 	else
3205 		txrx_ops->tx.tx = dp_tx_send;
3206 
3207 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
3208 
3209 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
3210 		"DP Vdev Register success");
3211 }
3212 
3213 /**
3214  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
3215  * @vdev: Datapath VDEV handle
3216  *
3217  * Return: void
3218  */
3219 static void dp_vdev_flush_peers(struct dp_vdev *vdev)
3220 {
3221 	struct dp_pdev *pdev = vdev->pdev;
3222 	struct dp_soc *soc = pdev->soc;
3223 	struct dp_peer *peer;
3224 	uint16_t *peer_ids;
3225 	uint8_t i = 0, j = 0;
3226 
3227 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
3228 	if (!peer_ids) {
3229 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3230 			"DP alloc failure - unable to flush peers");
3231 		return;
3232 	}
3233 
3234 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3235 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3236 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3237 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
3238 				if (j < soc->max_peers)
3239 					peer_ids[j++] = peer->peer_ids[i];
3240 	}
3241 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3242 
3243 	for (i = 0; i < j ; i++)
3244 		dp_rx_peer_unmap_handler(soc, peer_ids[i]);
3245 
3246 	qdf_mem_free(peer_ids);
3247 
3248 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3249 		FL("Flushed peers for vdev object %pK "), vdev);
3250 }
3251 
3252 /*
3253  * dp_vdev_detach_wifi3() - Detach txrx vdev
3254  * @txrx_vdev:		Datapath VDEV handle
3255  * @callback:		Callback OL_IF on completion of detach
3256  * @cb_context:	Callback context
3257  *
3258  */
3259 static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
3260 	ol_txrx_vdev_delete_cb callback, void *cb_context)
3261 {
3262 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3263 	struct dp_pdev *pdev = vdev->pdev;
3264 	struct dp_soc *soc = pdev->soc;
3265 
3266 	/* preconditions */
3267 	qdf_assert(vdev);
3268 
3269 	/* remove the vdev from its parent pdev's list */
3270 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
3271 
3272 	if (wlan_op_mode_sta == vdev->opmode)
3273 		dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
3274 
3275 	/*
3276 	 * If Target is hung, flush all peers before detaching vdev
3277 	 * this will free all references held due to missing
3278 	 * unmap commands from Target
3279 	 */
3280 	if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
3281 		dp_vdev_flush_peers(vdev);
3282 
3283 	/*
3284 	 * Use peer_ref_mutex while accessing peer_list, in case
3285 	 * a peer is in the process of being removed from the list.
3286 	 */
3287 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3288 	/* check that the vdev has no peers allocated */
3289 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
3290 		/* debug print - will be removed later */
3291 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
3292 			FL("not deleting vdev object %pK (%pM)"
3293 			"until deletion finishes for all its peers"),
3294 			vdev, vdev->mac_addr.raw);
3295 		/* indicate that the vdev needs to be deleted */
3296 		vdev->delete.pending = 1;
3297 		vdev->delete.callback = callback;
3298 		vdev->delete.context = cb_context;
3299 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3300 		return;
3301 	}
3302 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3303 
3304 	dp_tx_flow_pool_unmap_handler(pdev, vdev->vdev_id, FLOW_TYPE_VDEV,
3305 		vdev->vdev_id);
3306 	dp_tx_vdev_detach(vdev);
3307 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3308 		FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
3309 
3310 	qdf_mem_free(vdev);
3311 
3312 	if (callback)
3313 		callback(cb_context);
3314 }
3315 
3316 /*
3317  * dp_peer_create_wifi3() - attach txrx peer
3318  * @txrx_vdev: Datapath VDEV handle
3319  * @peer_mac_addr: Peer MAC address
3320  *
3321  * Return: DP peeer handle on success, NULL on failure
3322  */
3323 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
3324 		uint8_t *peer_mac_addr)
3325 {
3326 	struct dp_peer *peer;
3327 	int i;
3328 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3329 	struct dp_pdev *pdev;
3330 	struct dp_soc *soc;
3331 
3332 	/* preconditions */
3333 	qdf_assert(vdev);
3334 	qdf_assert(peer_mac_addr);
3335 
3336 	pdev = vdev->pdev;
3337 	soc = pdev->soc;
3338 #ifdef notyet
3339 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
3340 		soc->mempool_ol_ath_peer);
3341 #else
3342 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
3343 #endif
3344 
3345 	if (!peer)
3346 		return NULL; /* failure */
3347 
3348 	qdf_mem_zero(peer, sizeof(struct dp_peer));
3349 
3350 	TAILQ_INIT(&peer->ast_entry_list);
3351 
3352 	/* store provided params */
3353 	peer->vdev = vdev;
3354 
3355 	dp_peer_add_ast(soc, peer, peer_mac_addr, CDP_TXRX_AST_TYPE_STATIC, 0);
3356 
3357 	qdf_spinlock_create(&peer->peer_info_lock);
3358 
3359 	qdf_mem_copy(
3360 		&peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3361 
3362 	/* TODO: See of rx_opt_proc is really required */
3363 	peer->rx_opt_proc = soc->rx_opt_proc;
3364 
3365 	/* initialize the peer_id */
3366 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3367 		peer->peer_ids[i] = HTT_INVALID_PEER;
3368 
3369 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3370 
3371 	qdf_atomic_init(&peer->ref_cnt);
3372 
3373 	/* keep one reference for attach */
3374 	qdf_atomic_inc(&peer->ref_cnt);
3375 
3376 	/* add this peer into the vdev's list */
3377 	if (wlan_op_mode_sta == vdev->opmode)
3378 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
3379 	else
3380 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
3381 
3382 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3383 
3384 	/* TODO: See if hash based search is required */
3385 	dp_peer_find_hash_add(soc, peer);
3386 
3387 	/* Initialize the peer state */
3388 	peer->state = OL_TXRX_PEER_STATE_DISC;
3389 
3390 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3391 		"vdev %pK created peer %pK (%pM) ref_cnt: %d",
3392 		vdev, peer, peer->mac_addr.raw,
3393 		qdf_atomic_read(&peer->ref_cnt));
3394 	/*
3395 	 * For every peer MAp message search and set if bss_peer
3396 	 */
3397 	if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
3398 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3399 			"vdev bss_peer!!!!");
3400 		peer->bss_peer = 1;
3401 		vdev->vap_bss_peer = peer;
3402 	}
3403 
3404 
3405 #ifndef CONFIG_WIN
3406 	dp_local_peer_id_alloc(pdev, peer);
3407 #endif
3408 	DP_STATS_INIT(peer);
3409 	return (void *)peer;
3410 }
3411 
3412 /*
3413  * dp_peer_setup_wifi3() - initialize the peer
3414  * @vdev_hdl: virtual device object
3415  * @peer: Peer object
3416  *
3417  * Return: void
3418  */
3419 static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
3420 {
3421 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
3422 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
3423 	struct dp_pdev *pdev;
3424 	struct dp_soc *soc;
3425 	bool hash_based = 0;
3426 	enum cdp_host_reo_dest_ring reo_dest;
3427 
3428 	/* preconditions */
3429 	qdf_assert(vdev);
3430 	qdf_assert(peer);
3431 
3432 	pdev = vdev->pdev;
3433 	soc = pdev->soc;
3434 
3435 	peer->last_assoc_rcvd = 0;
3436 	peer->last_disassoc_rcvd = 0;
3437 	peer->last_deauth_rcvd = 0;
3438 
3439 	/*
3440 	 * hash based steering is disabled for Radios which are offloaded
3441 	 * to NSS
3442 	 */
3443 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
3444 		hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
3445 
3446 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3447 		FL("hash based steering for pdev: %d is %d\n"),
3448 		pdev->pdev_id, hash_based);
3449 
3450 	/*
3451 	 * Below line of code will ensure the proper reo_dest ring is choosen
3452 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
3453 	 */
3454 	reo_dest = pdev->reo_dest;
3455 
3456 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
3457 		/* TODO: Check the destination ring number to be passed to FW */
3458 		soc->cdp_soc.ol_ops->peer_set_default_routing(
3459 			pdev->osif_pdev, peer->mac_addr.raw,
3460 			 peer->vdev->vdev_id, hash_based, reo_dest);
3461 	}
3462 
3463 	dp_peer_rx_init(pdev, peer);
3464 	return;
3465 }
3466 
3467 /*
3468  * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
3469  * @vdev_handle: virtual device object
3470  * @htt_pkt_type: type of pkt
3471  *
3472  * Return: void
3473  */
3474 static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
3475 	 enum htt_cmn_pkt_type val)
3476 {
3477 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3478 	vdev->tx_encap_type = val;
3479 }
3480 
3481 /*
3482  * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
3483  * @vdev_handle: virtual device object
3484  * @htt_pkt_type: type of pkt
3485  *
3486  * Return: void
3487  */
3488 static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
3489 	 enum htt_cmn_pkt_type val)
3490 {
3491 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3492 	vdev->rx_decap_type = val;
3493 }
3494 
3495 /*
3496  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
3497  * @pdev_handle: physical device object
3498  * @val: reo destination ring index (1 - 4)
3499  *
3500  * Return: void
3501  */
3502 static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
3503 	 enum cdp_host_reo_dest_ring val)
3504 {
3505 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3506 
3507 	if (pdev)
3508 		pdev->reo_dest = val;
3509 }
3510 
3511 /*
3512  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
3513  * @pdev_handle: physical device object
3514  *
3515  * Return: reo destination ring index
3516  */
3517 static enum cdp_host_reo_dest_ring
3518 dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
3519 {
3520 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3521 
3522 	if (pdev)
3523 		return pdev->reo_dest;
3524 	else
3525 		return cdp_host_reo_dest_ring_unknown;
3526 }
3527 
3528 #ifdef QCA_SUPPORT_SON
3529 static void dp_son_peer_authorize(struct dp_peer *peer)
3530 {
3531 	struct dp_soc *soc;
3532 	soc = peer->vdev->pdev->soc;
3533 	peer->peer_bs_inact_flag = 0;
3534 	peer->peer_bs_inact = soc->pdev_bs_inact_reload;
3535 	return;
3536 }
3537 #else
3538 static void dp_son_peer_authorize(struct dp_peer *peer)
3539 {
3540 	return;
3541 }
3542 #endif
3543 /*
3544  * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
3545  * @pdev_handle: device object
3546  * @val: value to be set
3547  *
3548  * Return: void
3549  */
3550 static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
3551 	 uint32_t val)
3552 {
3553 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3554 
3555 	/* Enable/Disable smart mesh filtering. This flag will be checked
3556 	 * during rx processing to check if packets are from NAC clients.
3557 	 */
3558 	pdev->filter_neighbour_peers = val;
3559 	return 0;
3560 }
3561 
3562 /*
3563  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
3564  * address for smart mesh filtering
3565  * @pdev_handle: device object
3566  * @cmd: Add/Del command
3567  * @macaddr: nac client mac address
3568  *
3569  * Return: void
3570  */
3571 static int dp_update_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
3572 	 uint32_t cmd, uint8_t *macaddr)
3573 {
3574 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3575 	struct dp_neighbour_peer *peer = NULL;
3576 
3577 	if (!macaddr)
3578 		goto fail0;
3579 
3580 	/* Store address of NAC (neighbour peer) which will be checked
3581 	 * against TA of received packets.
3582 	 */
3583 	if (cmd == DP_NAC_PARAM_ADD) {
3584 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
3585 				sizeof(*peer));
3586 
3587 		if (!peer) {
3588 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3589 				FL("DP neighbour peer node memory allocation failed"));
3590 			goto fail0;
3591 		}
3592 
3593 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
3594 			macaddr, DP_MAC_ADDR_LEN);
3595 
3596 
3597 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3598 		/* add this neighbour peer into the list */
3599 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
3600 				neighbour_peer_list_elem);
3601 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3602 
3603 		return 1;
3604 
3605 	} else if (cmd == DP_NAC_PARAM_DEL) {
3606 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3607 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
3608 				neighbour_peer_list_elem) {
3609 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
3610 				macaddr, DP_MAC_ADDR_LEN)) {
3611 				/* delete this peer from the list */
3612 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
3613 					peer, neighbour_peer_list_elem);
3614 				qdf_mem_free(peer);
3615 				break;
3616 			}
3617 		}
3618 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3619 
3620 		return 1;
3621 
3622 	}
3623 
3624 fail0:
3625 	return 0;
3626 }
3627 
3628 /*
3629  * dp_get_sec_type() - Get the security type
3630  * @peer:		Datapath peer handle
3631  * @sec_idx:    Security id (mcast, ucast)
3632  *
3633  * return sec_type: Security type
3634  */
3635 static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
3636 {
3637 	struct dp_peer *dpeer = (struct dp_peer *)peer;
3638 
3639 	return dpeer->security[sec_idx].sec_type;
3640 }
3641 
3642 /*
3643  * dp_peer_authorize() - authorize txrx peer
3644  * @peer_handle:		Datapath peer handle
3645  * @authorize
3646  *
3647  */
3648 static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
3649 {
3650 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
3651 	struct dp_soc *soc;
3652 
3653 	if (peer != NULL) {
3654 		soc = peer->vdev->pdev->soc;
3655 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
3656 		dp_son_peer_authorize(peer);
3657 		peer->authorize = authorize ? 1 : 0;
3658 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3659 	}
3660 }
3661 
3662 #ifdef QCA_SUPPORT_SON
3663 /*
3664  * dp_txrx_update_inact_threshold() - Update inact timer threshold
3665  * @pdev_handle: Device handle
3666  * @new_threshold : updated threshold value
3667  *
3668  */
3669 static void
3670 dp_txrx_update_inact_threshold(struct cdp_pdev *pdev_handle,
3671 			       u_int16_t new_threshold)
3672 {
3673 	struct dp_vdev *vdev;
3674 	struct dp_peer *peer;
3675 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3676 	struct dp_soc *soc = pdev->soc;
3677 	u_int16_t old_threshold = soc->pdev_bs_inact_reload;
3678 
3679 	if (old_threshold == new_threshold)
3680 		return;
3681 
3682 	soc->pdev_bs_inact_reload = new_threshold;
3683 
3684 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3685 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
3686 		if (vdev->opmode != wlan_op_mode_ap)
3687 			continue;
3688 
3689 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3690 			if (!peer->authorize)
3691 				continue;
3692 
3693 			if (old_threshold - peer->peer_bs_inact >=
3694 					new_threshold) {
3695 				dp_mark_peer_inact((void *)peer, true);
3696 				peer->peer_bs_inact = 0;
3697 			} else {
3698 				peer->peer_bs_inact = new_threshold -
3699 					(old_threshold - peer->peer_bs_inact);
3700 			}
3701 		}
3702 	}
3703 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3704 }
3705 
3706 /**
3707  * dp_txrx_reset_inact_count(): Reset inact count
3708  * @pdev_handle - device handle
3709  *
3710  * Return: void
3711  */
3712 static void
3713 dp_txrx_reset_inact_count(struct cdp_pdev *pdev_handle)
3714 {
3715 	struct dp_vdev *vdev = NULL;
3716 	struct dp_peer *peer = NULL;
3717 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3718 	struct dp_soc *soc = pdev->soc;
3719 
3720 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3721 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
3722 		if (vdev->opmode != wlan_op_mode_ap)
3723 			continue;
3724 
3725 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3726 			if (!peer->authorize)
3727 				continue;
3728 
3729 			peer->peer_bs_inact = soc->pdev_bs_inact_reload;
3730 		}
3731 	}
3732 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3733 }
3734 
3735 /**
3736  * dp_set_inact_params(): set inactivity params
3737  * @pdev_handle - device handle
3738  * @inact_check_interval - inactivity interval
3739  * @inact_normal - Inactivity normal
3740  * @inact_overload - Inactivity overload
3741  *
3742  * Return: bool
3743  */
3744 bool dp_set_inact_params(struct cdp_pdev *pdev_handle,
3745 			 u_int16_t inact_check_interval,
3746 			 u_int16_t inact_normal, u_int16_t inact_overload)
3747 {
3748 	struct dp_soc *soc;
3749 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3750 
3751 	if (!pdev)
3752 		return false;
3753 
3754 	soc = pdev->soc;
3755 	if (!soc)
3756 		return false;
3757 
3758 	soc->pdev_bs_inact_interval = inact_check_interval;
3759 	soc->pdev_bs_inact_normal = inact_normal;
3760 	soc->pdev_bs_inact_overload = inact_overload;
3761 
3762 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
3763 					soc->pdev_bs_inact_normal);
3764 
3765 	return true;
3766 }
3767 
3768 /**
3769  * dp_start_inact_timer(): Inactivity timer start
3770  * @pdev_handle - device handle
3771  * @enable - Inactivity timer start/stop
3772  *
3773  * Return: bool
3774  */
3775 bool dp_start_inact_timer(struct cdp_pdev *pdev_handle, bool enable)
3776 {
3777 	struct dp_soc *soc;
3778 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3779 
3780 	if (!pdev)
3781 		return false;
3782 
3783 	soc = pdev->soc;
3784 	if (!soc)
3785 		return false;
3786 
3787 	if (enable) {
3788 		dp_txrx_reset_inact_count((struct cdp_pdev *)pdev);
3789 		qdf_timer_mod(&soc->pdev_bs_inact_timer,
3790 			      soc->pdev_bs_inact_interval * 1000);
3791 	} else {
3792 		qdf_timer_stop(&soc->pdev_bs_inact_timer);
3793 	}
3794 
3795 	return true;
3796 }
3797 
3798 /**
3799  * dp_set_overload(): Set inactivity overload
3800  * @pdev_handle - device handle
3801  * @overload - overload status
3802  *
3803  * Return: void
3804  */
3805 void dp_set_overload(struct cdp_pdev *pdev_handle, bool overload)
3806 {
3807 	struct dp_soc *soc;
3808 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3809 
3810 	if (!pdev)
3811 		return;
3812 
3813 	soc = pdev->soc;
3814 	if (!soc)
3815 		return;
3816 
3817 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
3818 			overload ? soc->pdev_bs_inact_overload :
3819 			soc->pdev_bs_inact_normal);
3820 }
3821 
3822 /**
3823  * dp_peer_is_inact(): check whether peer is inactive
3824  * @peer_handle - datapath peer handle
3825  *
3826  * Return: bool
3827  */
3828 bool dp_peer_is_inact(void *peer_handle)
3829 {
3830 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
3831 
3832 	if (!peer)
3833 		return false;
3834 
3835 	return peer->peer_bs_inact_flag == 1;
3836 }
3837 
3838 /**
3839  * dp_init_inact_timer: initialize the inact timer
3840  * @soc - SOC handle
3841  *
3842  * Return: void
3843  */
3844 void dp_init_inact_timer(struct dp_soc *soc)
3845 {
3846 	qdf_timer_init(soc->osdev, &soc->pdev_bs_inact_timer,
3847 		dp_txrx_peer_find_inact_timeout_handler,
3848 		(void *)soc, QDF_TIMER_TYPE_WAKE_APPS);
3849 }
3850 
3851 #else
3852 
3853 bool dp_set_inact_params(struct cdp_pdev *pdev, u_int16_t inact_check_interval,
3854 			 u_int16_t inact_normal, u_int16_t inact_overload)
3855 {
3856 	return false;
3857 }
3858 
3859 bool dp_start_inact_timer(struct cdp_pdev *pdev, bool enable)
3860 {
3861 	return false;
3862 }
3863 
3864 void dp_set_overload(struct cdp_pdev *pdev, bool overload)
3865 {
3866 	return;
3867 }
3868 
3869 void dp_init_inact_timer(struct dp_soc *soc)
3870 {
3871 	return;
3872 }
3873 
3874 bool dp_peer_is_inact(void *peer)
3875 {
3876 	return false;
3877 }
3878 #endif
3879 
3880 /*
3881  * dp_peer_unref_delete() - unref and delete peer
3882  * @peer_handle:		Datapath peer handle
3883  *
3884  */
3885 void dp_peer_unref_delete(void *peer_handle)
3886 {
3887 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
3888 	struct dp_peer *bss_peer = NULL;
3889 	struct dp_vdev *vdev = peer->vdev;
3890 	struct dp_pdev *pdev = vdev->pdev;
3891 	struct dp_soc *soc = pdev->soc;
3892 	struct dp_peer *tmppeer;
3893 	int found = 0;
3894 	uint16_t peer_id;
3895 	uint16_t vdev_id;
3896 
3897 	/*
3898 	 * Hold the lock all the way from checking if the peer ref count
3899 	 * is zero until the peer references are removed from the hash
3900 	 * table and vdev list (if the peer ref count is zero).
3901 	 * This protects against a new HL tx operation starting to use the
3902 	 * peer object just after this function concludes it's done being used.
3903 	 * Furthermore, the lock needs to be held while checking whether the
3904 	 * vdev's list of peers is empty, to make sure that list is not modified
3905 	 * concurrently with the empty check.
3906 	 */
3907 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3908 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3909 		  "%s: peer %pK ref_cnt(before decrement): %d\n", __func__,
3910 		  peer, qdf_atomic_read(&peer->ref_cnt));
3911 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
3912 		peer_id = peer->peer_ids[0];
3913 		vdev_id = vdev->vdev_id;
3914 
3915 		/*
3916 		 * Make sure that the reference to the peer in
3917 		 * peer object map is removed
3918 		 */
3919 		if (peer_id != HTT_INVALID_PEER)
3920 			soc->peer_id_to_obj_map[peer_id] = NULL;
3921 
3922 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3923 			"Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
3924 
3925 		/* remove the reference to the peer from the hash table */
3926 		dp_peer_find_hash_remove(soc, peer);
3927 
3928 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
3929 			if (tmppeer == peer) {
3930 				found = 1;
3931 				break;
3932 			}
3933 		}
3934 		if (found) {
3935 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
3936 				peer_list_elem);
3937 		} else {
3938 			/*Ignoring the remove operation as peer not found*/
3939 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
3940 				"peer %pK not found in vdev (%pK)->peer_list:%pK",
3941 				peer, vdev, &peer->vdev->peer_list);
3942 		}
3943 
3944 		/* cleanup the peer data */
3945 		dp_peer_cleanup(vdev, peer);
3946 
3947 		/* check whether the parent vdev has no peers left */
3948 		if (TAILQ_EMPTY(&vdev->peer_list)) {
3949 			/*
3950 			 * Now that there are no references to the peer, we can
3951 			 * release the peer reference lock.
3952 			 */
3953 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3954 			/*
3955 			 * Check if the parent vdev was waiting for its peers
3956 			 * to be deleted, in order for it to be deleted too.
3957 			 */
3958 			if (vdev->delete.pending) {
3959 				ol_txrx_vdev_delete_cb vdev_delete_cb =
3960 					vdev->delete.callback;
3961 				void *vdev_delete_context =
3962 					vdev->delete.context;
3963 
3964 				QDF_TRACE(QDF_MODULE_ID_DP,
3965 					QDF_TRACE_LEVEL_INFO_HIGH,
3966 					FL("deleting vdev object %pK (%pM)"
3967 					" - its last peer is done"),
3968 					vdev, vdev->mac_addr.raw);
3969 				/* all peers are gone, go ahead and delete it */
3970 				dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
3971 								FLOW_TYPE_VDEV,
3972 								vdev_id);
3973 				dp_tx_vdev_detach(vdev);
3974 				QDF_TRACE(QDF_MODULE_ID_DP,
3975 					QDF_TRACE_LEVEL_INFO_HIGH,
3976 					FL("deleting vdev object %pK (%pM)"),
3977 					vdev, vdev->mac_addr.raw);
3978 
3979 				qdf_mem_free(vdev);
3980 				vdev = NULL;
3981 				if (vdev_delete_cb)
3982 					vdev_delete_cb(vdev_delete_context);
3983 			}
3984 		} else {
3985 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3986 		}
3987 
3988 		if (vdev) {
3989 			if (vdev->vap_bss_peer == peer) {
3990 				vdev->vap_bss_peer = NULL;
3991 			}
3992 		}
3993 
3994 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
3995 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->osif_pdev,
3996 					vdev_id, peer->mac_addr.raw);
3997 		}
3998 
3999 		if (!vdev || !vdev->vap_bss_peer) {
4000 			goto free_peer;
4001 		}
4002 
4003 #ifdef notyet
4004 		qdf_mempool_free(soc->osdev, soc->mempool_ol_ath_peer, peer);
4005 #else
4006 		bss_peer = vdev->vap_bss_peer;
4007 		DP_UPDATE_STATS(bss_peer, peer);
4008 
4009 free_peer:
4010 		qdf_mem_free(peer);
4011 
4012 #endif
4013 	} else {
4014 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4015 	}
4016 }
4017 
4018 /*
4019  * dp_peer_detach_wifi3() – Detach txrx peer
4020  * @peer_handle: Datapath peer handle
4021  * @bitmap: bitmap indicating special handling of request.
4022  *
4023  */
4024 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
4025 {
4026 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4027 
4028 	/* redirect the peer's rx delivery function to point to a
4029 	 * discard func
4030 	 */
4031 	peer->rx_opt_proc = dp_rx_discard;
4032 
4033 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4034 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
4035 
4036 #ifndef CONFIG_WIN
4037 	dp_local_peer_id_free(peer->vdev->pdev, peer);
4038 #endif
4039 	qdf_spinlock_destroy(&peer->peer_info_lock);
4040 
4041 	/*
4042 	 * Remove the reference added during peer_attach.
4043 	 * The peer will still be left allocated until the
4044 	 * PEER_UNMAP message arrives to remove the other
4045 	 * reference, added by the PEER_MAP message.
4046 	 */
4047 	dp_peer_unref_delete(peer_handle);
4048 }
4049 
4050 /*
4051  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
4052  * @peer_handle:		Datapath peer handle
4053  *
4054  */
4055 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
4056 {
4057 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4058 	return vdev->mac_addr.raw;
4059 }
4060 
4061 /*
4062  * dp_vdev_set_wds() - Enable per packet stats
4063  * @vdev_handle: DP VDEV handle
4064  * @val: value
4065  *
4066  * Return: none
4067  */
4068 static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
4069 {
4070 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4071 
4072 	vdev->wds_enabled = val;
4073 	return 0;
4074 }
4075 
4076 /*
4077  * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
4078  * @peer_handle:		Datapath peer handle
4079  *
4080  */
4081 static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
4082 						uint8_t vdev_id)
4083 {
4084 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
4085 	struct dp_vdev *vdev = NULL;
4086 
4087 	if (qdf_unlikely(!pdev))
4088 		return NULL;
4089 
4090 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4091 		if (vdev->vdev_id == vdev_id)
4092 			break;
4093 	}
4094 
4095 	return (struct cdp_vdev *)vdev;
4096 }
4097 
4098 static int dp_get_opmode(struct cdp_vdev *vdev_handle)
4099 {
4100 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4101 
4102 	return vdev->opmode;
4103 }
4104 
4105 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
4106 {
4107 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4108 	struct dp_pdev *pdev = vdev->pdev;
4109 
4110 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
4111 }
4112 
4113 /**
4114  * dp_reset_monitor_mode() - Disable monitor mode
4115  * @pdev_handle: Datapath PDEV handle
4116  *
4117  * Return: 0 on success, not 0 on failure
4118  */
4119 static int dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
4120 {
4121 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4122 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4123 	struct dp_soc *soc;
4124 	uint8_t pdev_id;
4125 
4126 	pdev_id = pdev->pdev_id;
4127 	soc = pdev->soc;
4128 
4129 	pdev->monitor_vdev = NULL;
4130 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4131 
4132 	htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4133 		pdev->rxdma_mon_buf_ring.hal_srng,
4134 		RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4135 
4136 	htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4137 		pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
4138 		RX_BUFFER_SIZE, &htt_tlv_filter);
4139 
4140 	return 0;
4141 }
4142 
4143 /**
4144  * dp_set_nac() - set peer_nac
4145  * @peer_handle: Datapath PEER handle
4146  *
4147  * Return: void
4148  */
4149 static void dp_set_nac(struct cdp_peer *peer_handle)
4150 {
4151 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4152 
4153 	peer->nac = 1;
4154 }
4155 
4156 /**
4157  * dp_get_tx_pending() - read pending tx
4158  * @pdev_handle: Datapath PDEV handle
4159  *
4160  * Return: outstanding tx
4161  */
4162 static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
4163 {
4164 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4165 
4166 	return qdf_atomic_read(&pdev->num_tx_outstanding);
4167 }
4168 
4169 /**
4170  * dp_get_peer_mac_from_peer_id() - get peer mac
4171  * @pdev_handle: Datapath PDEV handle
4172  * @peer_id: Peer ID
4173  * @peer_mac: MAC addr of PEER
4174  *
4175  * Return: void
4176  */
4177 static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
4178 	uint32_t peer_id, uint8_t *peer_mac)
4179 {
4180 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4181 	struct dp_peer *peer;
4182 
4183 	if (pdev && peer_mac) {
4184 		peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
4185 		if (peer && peer->mac_addr.raw) {
4186 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
4187 					DP_MAC_ADDR_LEN);
4188 		}
4189 	}
4190 }
4191 
4192 /**
4193  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
4194  * @vdev_handle: Datapath VDEV handle
4195  * @smart_monitor: Flag to denote if its smart monitor mode
4196  *
4197  * Return: 0 on success, not 0 on failure
4198  */
4199 static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
4200 		uint8_t smart_monitor)
4201 {
4202 	/* Many monitor VAPs can exists in a system but only one can be up at
4203 	 * anytime
4204 	 */
4205 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4206 	struct dp_pdev *pdev;
4207 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4208 	struct dp_soc *soc;
4209 	uint8_t pdev_id;
4210 
4211 	qdf_assert(vdev);
4212 
4213 	pdev = vdev->pdev;
4214 	pdev_id = pdev->pdev_id;
4215 	soc = pdev->soc;
4216 
4217 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4218 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
4219 		pdev, pdev_id, soc, vdev);
4220 
4221 	/*Check if current pdev's monitor_vdev exists */
4222 	if (pdev->monitor_vdev) {
4223 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4224 			"vdev=%pK\n", vdev);
4225 		qdf_assert(vdev);
4226 	}
4227 
4228 	pdev->monitor_vdev = vdev;
4229 
4230 	/* If smart monitor mode, do not configure monitor ring */
4231 	if (smart_monitor)
4232 		return QDF_STATUS_SUCCESS;
4233 
4234 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4235 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]\n",
4236 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4237 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4238 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4239 		pdev->mo_data_filter);
4240 
4241 	htt_tlv_filter.mpdu_start = 1;
4242 	htt_tlv_filter.msdu_start = 1;
4243 	htt_tlv_filter.packet = 1;
4244 	htt_tlv_filter.msdu_end = 1;
4245 	htt_tlv_filter.mpdu_end = 1;
4246 	htt_tlv_filter.packet_header = 1;
4247 	htt_tlv_filter.attention = 1;
4248 	htt_tlv_filter.ppdu_start = 0;
4249 	htt_tlv_filter.ppdu_end = 0;
4250 	htt_tlv_filter.ppdu_end_user_stats = 0;
4251 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4252 	htt_tlv_filter.ppdu_end_status_done = 0;
4253 	htt_tlv_filter.header_per_msdu = 1;
4254 	htt_tlv_filter.enable_fp =
4255 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4256 	htt_tlv_filter.enable_md = 0;
4257 	htt_tlv_filter.enable_mo =
4258 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4259 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4260 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4261 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4262 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4263 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4264 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4265 
4266 	htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4267 		pdev->rxdma_mon_buf_ring.hal_srng,
4268 		RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4269 
4270 	htt_tlv_filter.mpdu_start = 1;
4271 	htt_tlv_filter.msdu_start = 1;
4272 	htt_tlv_filter.packet = 0;
4273 	htt_tlv_filter.msdu_end = 1;
4274 	htt_tlv_filter.mpdu_end = 1;
4275 	htt_tlv_filter.packet_header = 1;
4276 	htt_tlv_filter.attention = 1;
4277 	htt_tlv_filter.ppdu_start = 1;
4278 	htt_tlv_filter.ppdu_end = 1;
4279 	htt_tlv_filter.ppdu_end_user_stats = 1;
4280 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4281 	htt_tlv_filter.ppdu_end_status_done = 1;
4282 	htt_tlv_filter.header_per_msdu = 0;
4283 	htt_tlv_filter.enable_fp =
4284 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4285 	htt_tlv_filter.enable_md = 0;
4286 	htt_tlv_filter.enable_mo =
4287 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4288 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4289 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4290 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4291 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4292 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4293 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4294 
4295 	htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4296 		pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
4297 		RX_BUFFER_SIZE, &htt_tlv_filter);
4298 
4299 	return QDF_STATUS_SUCCESS;
4300 }
4301 
4302 /**
4303  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
4304  * @pdev_handle: Datapath PDEV handle
4305  * @filter_val: Flag to select Filter for monitor mode
4306  * Return: 0 on success, not 0 on failure
4307  */
4308 static int dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
4309 	struct cdp_monitor_filter *filter_val)
4310 {
4311 	/* Many monitor VAPs can exists in a system but only one can be up at
4312 	 * anytime
4313 	 */
4314 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4315 	struct dp_vdev *vdev = pdev->monitor_vdev;
4316 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4317 	struct dp_soc *soc;
4318 	uint8_t pdev_id;
4319 
4320 	pdev_id = pdev->pdev_id;
4321 	soc = pdev->soc;
4322 
4323 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4324 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
4325 		pdev, pdev_id, soc, vdev);
4326 
4327 	/*Check if current pdev's monitor_vdev exists */
4328 	if (!pdev->monitor_vdev) {
4329 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4330 			"vdev=%pK\n", vdev);
4331 		qdf_assert(vdev);
4332 	}
4333 
4334 	/* update filter mode, type in pdev structure */
4335 	pdev->mon_filter_mode = filter_val->mode;
4336 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
4337 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
4338 	pdev->fp_data_filter = filter_val->fp_data;
4339 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
4340 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
4341 	pdev->mo_data_filter = filter_val->mo_data;
4342 
4343 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4344 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]\n",
4345 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4346 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4347 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4348 		pdev->mo_data_filter);
4349 
4350 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4351 
4352 	htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4353 		pdev->rxdma_mon_buf_ring.hal_srng,
4354 		RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4355 
4356 	htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4357 		pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
4358 		RX_BUFFER_SIZE, &htt_tlv_filter);
4359 
4360 	htt_tlv_filter.mpdu_start = 1;
4361 	htt_tlv_filter.msdu_start = 1;
4362 	htt_tlv_filter.packet = 1;
4363 	htt_tlv_filter.msdu_end = 1;
4364 	htt_tlv_filter.mpdu_end = 1;
4365 	htt_tlv_filter.packet_header = 1;
4366 	htt_tlv_filter.attention = 1;
4367 	htt_tlv_filter.ppdu_start = 0;
4368 	htt_tlv_filter.ppdu_end = 0;
4369 	htt_tlv_filter.ppdu_end_user_stats = 0;
4370 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4371 	htt_tlv_filter.ppdu_end_status_done = 0;
4372 	htt_tlv_filter.header_per_msdu = 1;
4373 	htt_tlv_filter.enable_fp =
4374 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4375 	htt_tlv_filter.enable_md = 0;
4376 	htt_tlv_filter.enable_mo =
4377 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4378 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4379 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4380 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4381 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4382 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4383 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4384 
4385 	htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4386 		pdev->rxdma_mon_buf_ring.hal_srng, RXDMA_MONITOR_BUF,
4387 		RX_BUFFER_SIZE, &htt_tlv_filter);
4388 
4389 	htt_tlv_filter.mpdu_start = 1;
4390 	htt_tlv_filter.msdu_start = 1;
4391 	htt_tlv_filter.packet = 0;
4392 	htt_tlv_filter.msdu_end = 1;
4393 	htt_tlv_filter.mpdu_end = 1;
4394 	htt_tlv_filter.packet_header = 1;
4395 	htt_tlv_filter.attention = 1;
4396 	htt_tlv_filter.ppdu_start = 1;
4397 	htt_tlv_filter.ppdu_end = 1;
4398 	htt_tlv_filter.ppdu_end_user_stats = 1;
4399 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4400 	htt_tlv_filter.ppdu_end_status_done = 1;
4401 	htt_tlv_filter.header_per_msdu = 0;
4402 	htt_tlv_filter.enable_fp =
4403 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4404 	htt_tlv_filter.enable_md = 0;
4405 	htt_tlv_filter.enable_mo =
4406 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4407 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4408 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4409 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4410 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4411 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4412 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4413 
4414 	htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4415 		pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
4416 		RX_BUFFER_SIZE, &htt_tlv_filter);
4417 
4418 	return QDF_STATUS_SUCCESS;
4419 }
4420 
4421 /**
4422  * dp_get_pdev_id_frm_pdev() - get pdev_id
4423  * @pdev_handle: Datapath PDEV handle
4424  *
4425  * Return: pdev_id
4426  */
4427 static
4428 uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
4429 {
4430 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4431 
4432 	return pdev->pdev_id;
4433 }
4434 
4435 /**
4436  * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
4437  * @vdev_handle: Datapath VDEV handle
4438  * Return: true on ucast filter flag set
4439  */
4440 static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
4441 {
4442 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4443 	struct dp_pdev *pdev;
4444 
4445 	pdev = vdev->pdev;
4446 
4447 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
4448 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
4449 		return true;
4450 
4451 	return false;
4452 }
4453 
4454 /**
4455  * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
4456  * @vdev_handle: Datapath VDEV handle
4457  * Return: true on mcast filter flag set
4458  */
4459 static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
4460 {
4461 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4462 	struct dp_pdev *pdev;
4463 
4464 	pdev = vdev->pdev;
4465 
4466 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
4467 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
4468 		return true;
4469 
4470 	return false;
4471 }
4472 
4473 /**
4474  * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
4475  * @vdev_handle: Datapath VDEV handle
4476  * Return: true on non data filter flag set
4477  */
4478 static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
4479 {
4480 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4481 	struct dp_pdev *pdev;
4482 
4483 	pdev = vdev->pdev;
4484 
4485 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
4486 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
4487 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
4488 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
4489 			return true;
4490 		}
4491 	}
4492 
4493 	return false;
4494 }
4495 
4496 #ifdef MESH_MODE_SUPPORT
4497 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
4498 {
4499 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4500 
4501 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4502 		FL("val %d"), val);
4503 	vdev->mesh_vdev = val;
4504 }
4505 
4506 /*
4507  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
4508  * @vdev_hdl: virtual device object
4509  * @val: value to be set
4510  *
4511  * Return: void
4512  */
4513 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
4514 {
4515 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4516 
4517 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4518 		FL("val %d"), val);
4519 	vdev->mesh_rx_filter = val;
4520 }
4521 #endif
4522 
4523 /*
4524  * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
4525  * Current scope is bar recieved count
4526  *
4527  * @pdev_handle: DP_PDEV handle
4528  *
4529  * Return: void
4530  */
4531 #define STATS_PROC_TIMEOUT        (HZ/1000)
4532 
4533 static void
4534 dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
4535 {
4536 	struct dp_vdev *vdev;
4537 	struct dp_peer *peer;
4538 	uint32_t waitcnt;
4539 
4540 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4541 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4542 			if (!peer) {
4543 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4544 					FL("DP Invalid Peer refernce"));
4545 				return;
4546 			}
4547 
4548 			if (peer->delete_in_progress) {
4549 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4550 					FL("DP Peer deletion in progress"));
4551 				continue;
4552 			}
4553 
4554 			qdf_atomic_inc(&peer->ref_cnt);
4555 			waitcnt = 0;
4556 			dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
4557 			while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
4558 				&& waitcnt < 10) {
4559 				schedule_timeout_interruptible(
4560 						STATS_PROC_TIMEOUT);
4561 				waitcnt++;
4562 			}
4563 			qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
4564 			dp_peer_unref_delete(peer);
4565 		}
4566 	}
4567 }
4568 
4569 /**
4570  * dp_rx_bar_stats_cb(): BAR received stats callback
4571  * @soc: SOC handle
4572  * @cb_ctxt: Call back context
4573  * @reo_status: Reo status
4574  *
4575  * return: void
4576  */
4577 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
4578 	union hal_reo_status *reo_status)
4579 {
4580 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
4581 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
4582 
4583 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
4584 		DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
4585 			queue_status->header.status);
4586 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
4587 		return;
4588 	}
4589 
4590 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
4591 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
4592 
4593 }
4594 
4595 /**
4596  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
4597  * @vdev: DP VDEV handle
4598  *
4599  * return: void
4600  */
4601 void dp_aggregate_vdev_stats(struct dp_vdev *vdev)
4602 {
4603 	struct dp_peer *peer = NULL;
4604 	struct dp_soc *soc = vdev->pdev->soc;
4605 
4606 	qdf_mem_set(&(vdev->stats.tx), sizeof(vdev->stats.tx), 0x0);
4607 	qdf_mem_set(&(vdev->stats.rx), sizeof(vdev->stats.rx), 0x0);
4608 
4609 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
4610 		DP_UPDATE_STATS(vdev, peer);
4611 
4612 	if (soc->cdp_soc.ol_ops->update_dp_stats)
4613 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->osif_pdev,
4614 			&vdev->stats, (uint16_t) vdev->vdev_id,
4615 			UPDATE_VDEV_STATS);
4616 
4617 }
4618 
4619 /**
4620  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
4621  * @pdev: DP PDEV handle
4622  *
4623  * return: void
4624  */
4625 static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
4626 {
4627 	struct dp_vdev *vdev = NULL;
4628 	struct dp_soc *soc = pdev->soc;
4629 
4630 	qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
4631 	qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
4632 	qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
4633 
4634 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4635 
4636 		dp_aggregate_vdev_stats(vdev);
4637 		DP_UPDATE_STATS(pdev, vdev);
4638 
4639 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
4640 
4641 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
4642 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
4643 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
4644 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
4645 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
4646 		DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
4647 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
4648 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host);
4649 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
4650 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host);
4651 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
4652 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
4653 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
4654 		DP_STATS_AGGR(pdev, vdev,
4655 				tx_i.mcast_en.dropped_map_error);
4656 		DP_STATS_AGGR(pdev, vdev,
4657 				tx_i.mcast_en.dropped_self_mac);
4658 		DP_STATS_AGGR(pdev, vdev,
4659 				tx_i.mcast_en.dropped_send_fail);
4660 		DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
4661 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
4662 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
4663 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
4664 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na);
4665 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
4666 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified);
4667 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw);
4668 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.exception_fw);
4669 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.completion_fw);
4670 
4671 		pdev->stats.tx_i.dropped.dropped_pkt.num =
4672 			pdev->stats.tx_i.dropped.dma_error +
4673 			pdev->stats.tx_i.dropped.ring_full +
4674 			pdev->stats.tx_i.dropped.enqueue_fail +
4675 			pdev->stats.tx_i.dropped.desc_na +
4676 			pdev->stats.tx_i.dropped.res_full;
4677 
4678 		pdev->stats.tx.last_ack_rssi =
4679 			vdev->stats.tx.last_ack_rssi;
4680 		pdev->stats.tx_i.tso.num_seg =
4681 			vdev->stats.tx_i.tso.num_seg;
4682 	}
4683 	if (soc->cdp_soc.ol_ops->update_dp_stats)
4684 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->osif_pdev,
4685 				&pdev->stats, pdev->pdev_id, UPDATE_PDEV_STATS);
4686 
4687 }
4688 
4689 /**
4690  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
4691  * @pdev: DP_PDEV Handle
4692  *
4693  * Return:void
4694  */
4695 static inline void
4696 dp_print_pdev_tx_stats(struct dp_pdev *pdev)
4697 {
4698 	uint8_t index = 0;
4699 	DP_PRINT_STATS("PDEV Tx Stats:\n");
4700 	DP_PRINT_STATS("Received From Stack:");
4701 	DP_PRINT_STATS("	Packets = %d",
4702 			pdev->stats.tx_i.rcvd.num);
4703 	DP_PRINT_STATS("	Bytes = %llu",
4704 			pdev->stats.tx_i.rcvd.bytes);
4705 	DP_PRINT_STATS("Processed:");
4706 	DP_PRINT_STATS("	Packets = %d",
4707 			pdev->stats.tx_i.processed.num);
4708 	DP_PRINT_STATS("	Bytes = %llu",
4709 			pdev->stats.tx_i.processed.bytes);
4710 	DP_PRINT_STATS("Total Completions:");
4711 	DP_PRINT_STATS("	Packets = %u",
4712 			pdev->stats.tx.comp_pkt.num);
4713 	DP_PRINT_STATS("	Bytes = %llu",
4714 			pdev->stats.tx.comp_pkt.bytes);
4715 	DP_PRINT_STATS("Successful Completions:");
4716 	DP_PRINT_STATS("	Packets = %u",
4717 			pdev->stats.tx.tx_success.num);
4718 	DP_PRINT_STATS("	Bytes = %llu",
4719 			pdev->stats.tx.tx_success.bytes);
4720 	DP_PRINT_STATS("Dropped:");
4721 	DP_PRINT_STATS("	Total = %d",
4722 			pdev->stats.tx_i.dropped.dropped_pkt.num);
4723 	DP_PRINT_STATS("	Dma_map_error = %d",
4724 			pdev->stats.tx_i.dropped.dma_error);
4725 	DP_PRINT_STATS("	Ring Full = %d",
4726 			pdev->stats.tx_i.dropped.ring_full);
4727 	DP_PRINT_STATS("	Descriptor Not available = %d",
4728 			pdev->stats.tx_i.dropped.desc_na);
4729 	DP_PRINT_STATS("	HW enqueue failed= %d",
4730 			pdev->stats.tx_i.dropped.enqueue_fail);
4731 	DP_PRINT_STATS("	Resources Full = %d",
4732 			pdev->stats.tx_i.dropped.res_full);
4733 	DP_PRINT_STATS("	FW removed = %d",
4734 			pdev->stats.tx.dropped.fw_rem);
4735 	DP_PRINT_STATS("	FW removed transmitted = %d",
4736 			pdev->stats.tx.dropped.fw_rem_tx);
4737 	DP_PRINT_STATS("	FW removed untransmitted = %d",
4738 			pdev->stats.tx.dropped.fw_rem_notx);
4739 	DP_PRINT_STATS("	FW removed untransmitted fw_reason1 = %d",
4740 			pdev->stats.tx.dropped.fw_reason1);
4741 	DP_PRINT_STATS("	FW removed untransmitted fw_reason2 = %d",
4742 			pdev->stats.tx.dropped.fw_reason2);
4743 	DP_PRINT_STATS("	FW removed untransmitted fw_reason3 = %d",
4744 			pdev->stats.tx.dropped.fw_reason3);
4745 	DP_PRINT_STATS("	Aged Out from msdu/mpdu queues = %d",
4746 			pdev->stats.tx.dropped.age_out);
4747 	DP_PRINT_STATS("Scatter Gather:");
4748 	DP_PRINT_STATS("	Packets = %d",
4749 			pdev->stats.tx_i.sg.sg_pkt.num);
4750 	DP_PRINT_STATS("	Bytes = %llu",
4751 			pdev->stats.tx_i.sg.sg_pkt.bytes);
4752 	DP_PRINT_STATS("	Dropped By Host = %d",
4753 			pdev->stats.tx_i.sg.dropped_host);
4754 	DP_PRINT_STATS("	Dropped By Target = %d",
4755 			pdev->stats.tx_i.sg.dropped_target);
4756 	DP_PRINT_STATS("TSO:");
4757 	DP_PRINT_STATS("	Number of Segments = %d",
4758 			pdev->stats.tx_i.tso.num_seg);
4759 	DP_PRINT_STATS("	Packets = %d",
4760 			pdev->stats.tx_i.tso.tso_pkt.num);
4761 	DP_PRINT_STATS("	Bytes = %llu",
4762 			pdev->stats.tx_i.tso.tso_pkt.bytes);
4763 	DP_PRINT_STATS("	Dropped By Host = %d",
4764 			pdev->stats.tx_i.tso.dropped_host);
4765 	DP_PRINT_STATS("Mcast Enhancement:");
4766 	DP_PRINT_STATS("	Packets = %d",
4767 			pdev->stats.tx_i.mcast_en.mcast_pkt.num);
4768 	DP_PRINT_STATS("	Bytes = %llu",
4769 			pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
4770 	DP_PRINT_STATS("	Dropped: Map Errors = %d",
4771 			pdev->stats.tx_i.mcast_en.dropped_map_error);
4772 	DP_PRINT_STATS("	Dropped: Self Mac = %d",
4773 			pdev->stats.tx_i.mcast_en.dropped_self_mac);
4774 	DP_PRINT_STATS("	Dropped: Send Fail = %d",
4775 			pdev->stats.tx_i.mcast_en.dropped_send_fail);
4776 	DP_PRINT_STATS("	Unicast sent = %d",
4777 			pdev->stats.tx_i.mcast_en.ucast);
4778 	DP_PRINT_STATS("Raw:");
4779 	DP_PRINT_STATS("	Packets = %d",
4780 			pdev->stats.tx_i.raw.raw_pkt.num);
4781 	DP_PRINT_STATS("	Bytes = %llu",
4782 			pdev->stats.tx_i.raw.raw_pkt.bytes);
4783 	DP_PRINT_STATS("	DMA map error = %d",
4784 			pdev->stats.tx_i.raw.dma_map_error);
4785 	DP_PRINT_STATS("Reinjected:");
4786 	DP_PRINT_STATS("	Packets = %d",
4787 			pdev->stats.tx_i.reinject_pkts.num);
4788 	DP_PRINT_STATS("Bytes = %llu\n",
4789 				pdev->stats.tx_i.reinject_pkts.bytes);
4790 	DP_PRINT_STATS("Inspected:");
4791 	DP_PRINT_STATS("	Packets = %d",
4792 			pdev->stats.tx_i.inspect_pkts.num);
4793 	DP_PRINT_STATS("	Bytes = %llu",
4794 			pdev->stats.tx_i.inspect_pkts.bytes);
4795 	DP_PRINT_STATS("Nawds Multicast:");
4796 	DP_PRINT_STATS("	Packets = %d",
4797 			pdev->stats.tx_i.nawds_mcast.num);
4798 	DP_PRINT_STATS("	Bytes = %llu",
4799 			pdev->stats.tx_i.nawds_mcast.bytes);
4800 	DP_PRINT_STATS("CCE Classified:");
4801 	DP_PRINT_STATS("	CCE Classified Packets: %u",
4802 			pdev->stats.tx_i.cce_classified);
4803 	DP_PRINT_STATS("	RAW CCE Classified Packets: %u",
4804 			pdev->stats.tx_i.cce_classified_raw);
4805 	DP_PRINT_STATS("Mesh stats:");
4806 	DP_PRINT_STATS("	frames to firmware: %u",
4807 			pdev->stats.tx_i.mesh.exception_fw);
4808 	DP_PRINT_STATS("	completions from fw: %u",
4809 			pdev->stats.tx_i.mesh.completion_fw);
4810 	DP_PRINT_STATS("PPDU stats counter");
4811 	for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
4812 		DP_PRINT_STATS("	Tag[%d] = %llu", index,
4813 				pdev->stats.ppdu_stats_counter[index]);
4814 	}
4815 }
4816 
4817 /**
4818  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
4819  * @pdev: DP_PDEV Handle
4820  *
4821  * Return: void
4822  */
4823 static inline void
4824 dp_print_pdev_rx_stats(struct dp_pdev *pdev)
4825 {
4826 	DP_PRINT_STATS("PDEV Rx Stats:\n");
4827 	DP_PRINT_STATS("Received From HW (Per Rx Ring):");
4828 	DP_PRINT_STATS("	Packets = %d %d %d %d",
4829 			pdev->stats.rx.rcvd_reo[0].num,
4830 			pdev->stats.rx.rcvd_reo[1].num,
4831 			pdev->stats.rx.rcvd_reo[2].num,
4832 			pdev->stats.rx.rcvd_reo[3].num);
4833 	DP_PRINT_STATS("	Bytes = %llu %llu %llu %llu",
4834 			pdev->stats.rx.rcvd_reo[0].bytes,
4835 			pdev->stats.rx.rcvd_reo[1].bytes,
4836 			pdev->stats.rx.rcvd_reo[2].bytes,
4837 			pdev->stats.rx.rcvd_reo[3].bytes);
4838 	DP_PRINT_STATS("Replenished:");
4839 	DP_PRINT_STATS("	Packets = %d",
4840 			pdev->stats.replenish.pkts.num);
4841 	DP_PRINT_STATS("	Bytes = %llu",
4842 			pdev->stats.replenish.pkts.bytes);
4843 	DP_PRINT_STATS("	Buffers Added To Freelist = %d",
4844 			pdev->stats.buf_freelist);
4845 	DP_PRINT_STATS("	Low threshold intr = %d",
4846 			pdev->stats.replenish.low_thresh_intrs);
4847 	DP_PRINT_STATS("Dropped:");
4848 	DP_PRINT_STATS("	msdu_not_done = %d",
4849 			pdev->stats.dropped.msdu_not_done);
4850 	DP_PRINT_STATS("        mon_rx_drop = %d",
4851 			pdev->stats.dropped.mon_rx_drop);
4852 	DP_PRINT_STATS("Sent To Stack:");
4853 	DP_PRINT_STATS("	Packets = %d",
4854 			pdev->stats.rx.to_stack.num);
4855 	DP_PRINT_STATS("	Bytes = %llu",
4856 			pdev->stats.rx.to_stack.bytes);
4857 	DP_PRINT_STATS("Multicast/Broadcast:");
4858 	DP_PRINT_STATS("	Packets = %d",
4859 			pdev->stats.rx.multicast.num);
4860 	DP_PRINT_STATS("	Bytes = %llu",
4861 			pdev->stats.rx.multicast.bytes);
4862 	DP_PRINT_STATS("Errors:");
4863 	DP_PRINT_STATS("	Rxdma Ring Un-inititalized = %d",
4864 			pdev->stats.replenish.rxdma_err);
4865 	DP_PRINT_STATS("	Desc Alloc Failed: = %d",
4866 			pdev->stats.err.desc_alloc_fail);
4867 
4868 	/* Get bar_recv_cnt */
4869 	dp_aggregate_pdev_ctrl_frames_stats(pdev);
4870 	DP_PRINT_STATS("BAR Received Count: = %d",
4871 			pdev->stats.rx.bar_recv_cnt);
4872 
4873 }
4874 
4875 /**
4876  * dp_print_soc_tx_stats(): Print SOC level  stats
4877  * @soc DP_SOC Handle
4878  *
4879  * Return: void
4880  */
4881 static inline void
4882 dp_print_soc_tx_stats(struct dp_soc *soc)
4883 {
4884 	DP_PRINT_STATS("SOC Tx Stats:\n");
4885 	DP_PRINT_STATS("Tx Descriptors In Use = %d",
4886 			soc->stats.tx.desc_in_use);
4887 	DP_PRINT_STATS("Invalid peer:");
4888 	DP_PRINT_STATS("	Packets = %d",
4889 			soc->stats.tx.tx_invalid_peer.num);
4890 	DP_PRINT_STATS("	Bytes = %llu",
4891 			soc->stats.tx.tx_invalid_peer.bytes);
4892 	DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
4893 			soc->stats.tx.tcl_ring_full[0],
4894 			soc->stats.tx.tcl_ring_full[1],
4895 			soc->stats.tx.tcl_ring_full[2]);
4896 
4897 }
4898 
4899 
4900 /**
4901  * dp_print_soc_rx_stats: Print SOC level Rx stats
4902  * @soc: DP_SOC Handle
4903  *
4904  * Return:void
4905  */
4906 static inline void
4907 dp_print_soc_rx_stats(struct dp_soc *soc)
4908 {
4909 	uint32_t i;
4910 	char reo_error[DP_REO_ERR_LENGTH];
4911 	char rxdma_error[DP_RXDMA_ERR_LENGTH];
4912 	uint8_t index = 0;
4913 
4914 	DP_PRINT_STATS("SOC Rx Stats:\n");
4915 	DP_PRINT_STATS("Errors:\n");
4916 	DP_PRINT_STATS("Rx Decrypt Errors = %d",
4917 			(soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
4918 			soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
4919 	DP_PRINT_STATS("Invalid RBM = %d",
4920 			soc->stats.rx.err.invalid_rbm);
4921 	DP_PRINT_STATS("Invalid Vdev = %d",
4922 			soc->stats.rx.err.invalid_vdev);
4923 	DP_PRINT_STATS("Invalid Pdev = %d",
4924 			soc->stats.rx.err.invalid_pdev);
4925 	DP_PRINT_STATS("Invalid Peer = %d",
4926 			soc->stats.rx.err.rx_invalid_peer.num);
4927 	DP_PRINT_STATS("HAL Ring Access Fail = %d",
4928 			soc->stats.rx.err.hal_ring_access_fail);
4929 
4930 	for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
4931 		index += qdf_snprint(&rxdma_error[index],
4932 				DP_RXDMA_ERR_LENGTH - index,
4933 				" %d", soc->stats.rx.err.rxdma_error[i]);
4934 	}
4935 	DP_PRINT_STATS("RXDMA Error (0-31):%s",
4936 			rxdma_error);
4937 
4938 	index = 0;
4939 	for (i = 0; i < HAL_REO_ERR_MAX; i++) {
4940 		index += qdf_snprint(&reo_error[index],
4941 				DP_REO_ERR_LENGTH - index,
4942 				" %d", soc->stats.rx.err.reo_error[i]);
4943 	}
4944 	DP_PRINT_STATS("REO Error(0-14):%s",
4945 			reo_error);
4946 }
4947 
4948 
4949 /**
4950  * dp_print_ring_stat_from_hal(): Print hal level ring stats
4951  * @soc: DP_SOC handle
4952  * @srng: DP_SRNG handle
4953  * @ring_name: SRNG name
4954  *
4955  * Return: void
4956  */
4957 static inline void
4958 dp_print_ring_stat_from_hal(struct dp_soc *soc,  struct dp_srng *srng,
4959 	char *ring_name)
4960 {
4961 	uint32_t tailp;
4962 	uint32_t headp;
4963 
4964 	if (srng->hal_srng != NULL) {
4965 		hal_api_get_tphp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
4966 		DP_PRINT_STATS("%s : Head pointer = %d  Tail Pointer = %d\n",
4967 				ring_name, headp, tailp);
4968 	}
4969 }
4970 
4971 /**
4972  * dp_print_ring_stats(): Print tail and head pointer
4973  * @pdev: DP_PDEV handle
4974  *
4975  * Return:void
4976  */
4977 static inline void
4978 dp_print_ring_stats(struct dp_pdev *pdev)
4979 {
4980 	uint32_t i;
4981 	char ring_name[STR_MAXLEN + 1];
4982 
4983 	dp_print_ring_stat_from_hal(pdev->soc,
4984 			&pdev->soc->reo_exception_ring,
4985 			"Reo Exception Ring");
4986 	dp_print_ring_stat_from_hal(pdev->soc,
4987 			&pdev->soc->reo_reinject_ring,
4988 			"Reo Inject Ring");
4989 	dp_print_ring_stat_from_hal(pdev->soc,
4990 			&pdev->soc->reo_cmd_ring,
4991 			"Reo Command Ring");
4992 	dp_print_ring_stat_from_hal(pdev->soc,
4993 			&pdev->soc->reo_status_ring,
4994 			"Reo Status Ring");
4995 	dp_print_ring_stat_from_hal(pdev->soc,
4996 			&pdev->soc->rx_rel_ring,
4997 			"Rx Release ring");
4998 	dp_print_ring_stat_from_hal(pdev->soc,
4999 			&pdev->soc->tcl_cmd_ring,
5000 			"Tcl command Ring");
5001 	dp_print_ring_stat_from_hal(pdev->soc,
5002 			&pdev->soc->tcl_status_ring,
5003 			"Tcl Status Ring");
5004 	dp_print_ring_stat_from_hal(pdev->soc,
5005 			&pdev->soc->wbm_desc_rel_ring,
5006 			"Wbm Desc Rel Ring");
5007 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5008 		snprintf(ring_name, STR_MAXLEN, "Reo Dest Ring %d", i);
5009 		dp_print_ring_stat_from_hal(pdev->soc,
5010 				&pdev->soc->reo_dest_ring[i],
5011 				ring_name);
5012 	}
5013 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
5014 		snprintf(ring_name, STR_MAXLEN, "Tcl Data Ring %d", i);
5015 		dp_print_ring_stat_from_hal(pdev->soc,
5016 				&pdev->soc->tcl_data_ring[i],
5017 				ring_name);
5018 	}
5019 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
5020 		snprintf(ring_name, STR_MAXLEN, "Tx Comp Ring %d", i);
5021 		dp_print_ring_stat_from_hal(pdev->soc,
5022 				&pdev->soc->tx_comp_ring[i],
5023 				ring_name);
5024 	}
5025 	dp_print_ring_stat_from_hal(pdev->soc,
5026 			&pdev->rx_refill_buf_ring,
5027 			"Rx Refill Buf Ring");
5028 
5029 	dp_print_ring_stat_from_hal(pdev->soc,
5030 			&pdev->rx_refill_buf_ring2,
5031 			"Second Rx Refill Buf Ring");
5032 
5033 	dp_print_ring_stat_from_hal(pdev->soc,
5034 			&pdev->rxdma_mon_buf_ring,
5035 			"Rxdma Mon Buf Ring");
5036 	dp_print_ring_stat_from_hal(pdev->soc,
5037 			&pdev->rxdma_mon_dst_ring,
5038 			"Rxdma Mon Dst Ring");
5039 	dp_print_ring_stat_from_hal(pdev->soc,
5040 			&pdev->rxdma_mon_status_ring,
5041 			"Rxdma Mon Status Ring");
5042 	dp_print_ring_stat_from_hal(pdev->soc,
5043 			&pdev->rxdma_mon_desc_ring,
5044 			"Rxdma mon desc Ring");
5045 
5046 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
5047 		snprintf(ring_name, STR_MAXLEN, "Rxdma err dst ring %d", i);
5048 		dp_print_ring_stat_from_hal(pdev->soc,
5049 			&pdev->rxdma_err_dst_ring[i],
5050 			ring_name);
5051 	}
5052 
5053 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
5054 		snprintf(ring_name, STR_MAXLEN, "Rx mac buf ring %d", i);
5055 		dp_print_ring_stat_from_hal(pdev->soc,
5056 				&pdev->rx_mac_buf_ring[i],
5057 				ring_name);
5058 	}
5059 }
5060 
5061 /**
5062  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
5063  * @vdev: DP_VDEV handle
5064  *
5065  * Return:void
5066  */
5067 static inline void
5068 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
5069 {
5070 	struct dp_peer *peer = NULL;
5071 	struct dp_soc *soc = (struct dp_soc *)vdev->pdev->soc;
5072 
5073 	DP_STATS_CLR(vdev->pdev);
5074 	DP_STATS_CLR(vdev->pdev->soc);
5075 	DP_STATS_CLR(vdev);
5076 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5077 		if (!peer)
5078 			return;
5079 		DP_STATS_CLR(peer);
5080 
5081 		if (soc->cdp_soc.ol_ops->update_dp_stats) {
5082 			soc->cdp_soc.ol_ops->update_dp_stats(
5083 					vdev->pdev->osif_pdev,
5084 					&peer->stats,
5085 					peer->peer_ids[0],
5086 					UPDATE_PEER_STATS);
5087 		}
5088 
5089 	}
5090 
5091 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5092 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->osif_pdev,
5093 				&vdev->stats, (uint16_t)vdev->vdev_id,
5094 				UPDATE_VDEV_STATS);
5095 }
5096 
5097 /**
5098  * dp_print_rx_rates(): Print Rx rate stats
5099  * @vdev: DP_VDEV handle
5100  *
5101  * Return:void
5102  */
5103 static inline void
5104 dp_print_rx_rates(struct dp_vdev *vdev)
5105 {
5106 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5107 	uint8_t i, mcs, pkt_type;
5108 	uint8_t index = 0;
5109 	char nss[DP_NSS_LENGTH];
5110 
5111 	DP_PRINT_STATS("Rx Rate Info:\n");
5112 
5113 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5114 		index = 0;
5115 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5116 			if (!dp_rate_string[pkt_type][mcs].valid)
5117 				continue;
5118 
5119 			DP_PRINT_STATS("	%s = %d",
5120 					dp_rate_string[pkt_type][mcs].mcs_type,
5121 					pdev->stats.rx.pkt_type[pkt_type].
5122 					mcs_count[mcs]);
5123 		}
5124 
5125 		DP_PRINT_STATS("\n");
5126 	}
5127 
5128 	index = 0;
5129 	for (i = 0; i < SS_COUNT; i++) {
5130 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5131 				" %d", pdev->stats.rx.nss[i]);
5132 	}
5133 	DP_PRINT_STATS("NSS(1-8) = %s",
5134 			nss);
5135 
5136 	DP_PRINT_STATS("SGI ="
5137 			" 0.8us %d,"
5138 			" 0.4us %d,"
5139 			" 1.6us %d,"
5140 			" 3.2us %d,",
5141 			pdev->stats.rx.sgi_count[0],
5142 			pdev->stats.rx.sgi_count[1],
5143 			pdev->stats.rx.sgi_count[2],
5144 			pdev->stats.rx.sgi_count[3]);
5145 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
5146 			pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
5147 			pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
5148 	DP_PRINT_STATS("Reception Type ="
5149 			" SU: %d,"
5150 			" MU_MIMO:%d,"
5151 			" MU_OFDMA:%d,"
5152 			" MU_OFDMA_MIMO:%d\n",
5153 			pdev->stats.rx.reception_type[0],
5154 			pdev->stats.rx.reception_type[1],
5155 			pdev->stats.rx.reception_type[2],
5156 			pdev->stats.rx.reception_type[3]);
5157 	DP_PRINT_STATS("Aggregation:\n");
5158 	DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
5159 			pdev->stats.rx.ampdu_cnt);
5160 	DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
5161 			pdev->stats.rx.non_ampdu_cnt);
5162 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
5163 			pdev->stats.rx.amsdu_cnt);
5164 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
5165 			pdev->stats.rx.non_amsdu_cnt);
5166 }
5167 
5168 /**
5169  * dp_print_tx_rates(): Print tx rates
5170  * @vdev: DP_VDEV handle
5171  *
5172  * Return:void
5173  */
5174 static inline void
5175 dp_print_tx_rates(struct dp_vdev *vdev)
5176 {
5177 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5178 	uint8_t mcs, pkt_type;
5179 	uint32_t index;
5180 
5181 	DP_PRINT_STATS("Tx Rate Info:\n");
5182 
5183 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5184 		index = 0;
5185 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5186 			if (!dp_rate_string[pkt_type][mcs].valid)
5187 				continue;
5188 
5189 			DP_PRINT_STATS("	%s = %d",
5190 					dp_rate_string[pkt_type][mcs].mcs_type,
5191 					pdev->stats.tx.pkt_type[pkt_type].
5192 					mcs_count[mcs]);
5193 		}
5194 
5195 		DP_PRINT_STATS("\n");
5196 	}
5197 
5198 	DP_PRINT_STATS("SGI ="
5199 			" 0.8us %d"
5200 			" 0.4us %d"
5201 			" 1.6us %d"
5202 			" 3.2us %d",
5203 			pdev->stats.tx.sgi_count[0],
5204 			pdev->stats.tx.sgi_count[1],
5205 			pdev->stats.tx.sgi_count[2],
5206 			pdev->stats.tx.sgi_count[3]);
5207 
5208 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
5209 			pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
5210 			pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
5211 
5212 	DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
5213 	DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
5214 	DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
5215 	DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
5216 	DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
5217 
5218 	DP_PRINT_STATS("Aggregation:\n");
5219 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
5220 			pdev->stats.tx.amsdu_cnt);
5221 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
5222 			pdev->stats.tx.non_amsdu_cnt);
5223 }
5224 
5225 /**
5226  * dp_print_peer_stats():print peer stats
5227  * @peer: DP_PEER handle
5228  *
5229  * return void
5230  */
5231 static inline void dp_print_peer_stats(struct dp_peer *peer)
5232 {
5233 	uint8_t i, mcs, pkt_type;
5234 	uint32_t index;
5235 	char nss[DP_NSS_LENGTH];
5236 	DP_PRINT_STATS("Node Tx Stats:\n");
5237 	DP_PRINT_STATS("Total Packet Completions = %d",
5238 			peer->stats.tx.comp_pkt.num);
5239 	DP_PRINT_STATS("Total Bytes Completions = %llu",
5240 			peer->stats.tx.comp_pkt.bytes);
5241 	DP_PRINT_STATS("Success Packets = %d",
5242 			peer->stats.tx.tx_success.num);
5243 	DP_PRINT_STATS("Success Bytes = %llu",
5244 			peer->stats.tx.tx_success.bytes);
5245 	DP_PRINT_STATS("Unicast Success Packets = %d",
5246 			peer->stats.tx.ucast.num);
5247 	DP_PRINT_STATS("Unicast Success Bytes = %llu",
5248 			peer->stats.tx.ucast.bytes);
5249 	DP_PRINT_STATS("Multicast Success Packets = %d",
5250 			peer->stats.tx.mcast.num);
5251 	DP_PRINT_STATS("Multicast Success Bytes = %llu",
5252 			peer->stats.tx.mcast.bytes);
5253 	DP_PRINT_STATS("Broadcast Success Packets = %d",
5254 			peer->stats.tx.bcast.num);
5255 	DP_PRINT_STATS("Broadcast Success Bytes = %llu",
5256 			peer->stats.tx.bcast.bytes);
5257 	DP_PRINT_STATS("Packets Failed = %d",
5258 			peer->stats.tx.tx_failed);
5259 	DP_PRINT_STATS("Packets In OFDMA = %d",
5260 			peer->stats.tx.ofdma);
5261 	DP_PRINT_STATS("Packets In STBC = %d",
5262 			peer->stats.tx.stbc);
5263 	DP_PRINT_STATS("Packets In LDPC = %d",
5264 			peer->stats.tx.ldpc);
5265 	DP_PRINT_STATS("Packet Retries = %d",
5266 			peer->stats.tx.retries);
5267 	DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
5268 			peer->stats.tx.amsdu_cnt);
5269 	DP_PRINT_STATS("Last Packet RSSI = %d",
5270 			peer->stats.tx.last_ack_rssi);
5271 	DP_PRINT_STATS("Dropped At FW: Removed = %d",
5272 			peer->stats.tx.dropped.fw_rem);
5273 	DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
5274 			peer->stats.tx.dropped.fw_rem_tx);
5275 	DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
5276 			peer->stats.tx.dropped.fw_rem_notx);
5277 	DP_PRINT_STATS("Dropped : Age Out = %d",
5278 			peer->stats.tx.dropped.age_out);
5279 	DP_PRINT_STATS("NAWDS : ");
5280 	DP_PRINT_STATS("	Nawds multicast Drop Tx Packet = %d",
5281 			peer->stats.tx.nawds_mcast_drop);
5282 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Count = %d",
5283 			peer->stats.tx.nawds_mcast.num);
5284 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Bytes = %llu",
5285 			peer->stats.tx.nawds_mcast.bytes);
5286 
5287 	DP_PRINT_STATS("Rate Info:");
5288 
5289 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5290 		index = 0;
5291 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5292 			if (!dp_rate_string[pkt_type][mcs].valid)
5293 				continue;
5294 
5295 			DP_PRINT_STATS("	%s = %d",
5296 					dp_rate_string[pkt_type][mcs].mcs_type,
5297 					peer->stats.tx.pkt_type[pkt_type].
5298 					mcs_count[mcs]);
5299 		}
5300 
5301 		DP_PRINT_STATS("\n");
5302 	}
5303 
5304 	DP_PRINT_STATS("SGI = "
5305 			" 0.8us %d"
5306 			" 0.4us %d"
5307 			" 1.6us %d"
5308 			" 3.2us %d",
5309 			peer->stats.tx.sgi_count[0],
5310 			peer->stats.tx.sgi_count[1],
5311 			peer->stats.tx.sgi_count[2],
5312 			peer->stats.tx.sgi_count[3]);
5313 	DP_PRINT_STATS("Excess Retries per AC ");
5314 	DP_PRINT_STATS("	 Best effort = %d",
5315 			peer->stats.tx.excess_retries_per_ac[0]);
5316 	DP_PRINT_STATS("	 Background= %d",
5317 			peer->stats.tx.excess_retries_per_ac[1]);
5318 	DP_PRINT_STATS("	 Video = %d",
5319 			peer->stats.tx.excess_retries_per_ac[2]);
5320 	DP_PRINT_STATS("	 Voice = %d",
5321 			peer->stats.tx.excess_retries_per_ac[3]);
5322 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
5323 			peer->stats.tx.bw[2], peer->stats.tx.bw[3],
5324 			peer->stats.tx.bw[4], peer->stats.tx.bw[5]);
5325 
5326 	index = 0;
5327 	for (i = 0; i < SS_COUNT; i++) {
5328 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5329 				" %d", peer->stats.tx.nss[i]);
5330 	}
5331 	DP_PRINT_STATS("NSS(1-8) = %s",
5332 			nss);
5333 
5334 	DP_PRINT_STATS("Aggregation:");
5335 	DP_PRINT_STATS("	Number of Msdu's Part of Amsdu = %d",
5336 			peer->stats.tx.amsdu_cnt);
5337 	DP_PRINT_STATS("	Number of Msdu's With No Msdu Level Aggregation = %d\n",
5338 			peer->stats.tx.non_amsdu_cnt);
5339 
5340 	DP_PRINT_STATS("Node Rx Stats:");
5341 	DP_PRINT_STATS("Packets Sent To Stack = %d",
5342 			peer->stats.rx.to_stack.num);
5343 	DP_PRINT_STATS("Bytes Sent To Stack = %llu",
5344 			peer->stats.rx.to_stack.bytes);
5345 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
5346 		DP_PRINT_STATS("Ring Id = %d", i);
5347 		DP_PRINT_STATS("	Packets Received = %d",
5348 				peer->stats.rx.rcvd_reo[i].num);
5349 		DP_PRINT_STATS("	Bytes Received = %llu",
5350 				peer->stats.rx.rcvd_reo[i].bytes);
5351 	}
5352 	DP_PRINT_STATS("Multicast Packets Received = %d",
5353 			peer->stats.rx.multicast.num);
5354 	DP_PRINT_STATS("Multicast Bytes Received = %llu",
5355 			peer->stats.rx.multicast.bytes);
5356 	DP_PRINT_STATS("Broadcast Packets Received = %d",
5357 			peer->stats.rx.bcast.num);
5358 	DP_PRINT_STATS("Broadcast Bytes Received = %llu",
5359 			peer->stats.rx.bcast.bytes);
5360 	DP_PRINT_STATS("Intra BSS Packets Received = %d",
5361 			peer->stats.rx.intra_bss.pkts.num);
5362 	DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
5363 			peer->stats.rx.intra_bss.pkts.bytes);
5364 	DP_PRINT_STATS("Raw Packets Received = %d",
5365 			peer->stats.rx.raw.num);
5366 	DP_PRINT_STATS("Raw Bytes Received = %llu",
5367 			peer->stats.rx.raw.bytes);
5368 	DP_PRINT_STATS("Errors: MIC Errors = %d",
5369 			peer->stats.rx.err.mic_err);
5370 	DP_PRINT_STATS("Erros: Decryption Errors = %d",
5371 			peer->stats.rx.err.decrypt_err);
5372 	DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
5373 			peer->stats.rx.non_ampdu_cnt);
5374 	DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
5375 			peer->stats.rx.ampdu_cnt);
5376 	DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
5377 			peer->stats.rx.non_amsdu_cnt);
5378 	DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
5379 			peer->stats.rx.amsdu_cnt);
5380 	DP_PRINT_STATS("NAWDS : ");
5381 	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet = %d",
5382 			peer->stats.rx.nawds_mcast_drop.num);
5383 	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet Bytes = %llu",
5384 			peer->stats.rx.nawds_mcast_drop.bytes);
5385 	DP_PRINT_STATS("SGI ="
5386 			" 0.8us %d"
5387 			" 0.4us %d"
5388 			" 1.6us %d"
5389 			" 3.2us %d",
5390 			peer->stats.rx.sgi_count[0],
5391 			peer->stats.rx.sgi_count[1],
5392 			peer->stats.rx.sgi_count[2],
5393 			peer->stats.rx.sgi_count[3]);
5394 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
5395 			peer->stats.rx.bw[0], peer->stats.rx.bw[1],
5396 			peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
5397 	DP_PRINT_STATS("Reception Type ="
5398 			" SU %d,"
5399 			" MU_MIMO %d,"
5400 			" MU_OFDMA %d,"
5401 			" MU_OFDMA_MIMO %d",
5402 			peer->stats.rx.reception_type[0],
5403 			peer->stats.rx.reception_type[1],
5404 			peer->stats.rx.reception_type[2],
5405 			peer->stats.rx.reception_type[3]);
5406 
5407 
5408 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5409 		index = 0;
5410 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5411 			if (!dp_rate_string[pkt_type][mcs].valid)
5412 				continue;
5413 
5414 			DP_PRINT_STATS("	%s = %d",
5415 					dp_rate_string[pkt_type][mcs].mcs_type,
5416 					peer->stats.rx.pkt_type[pkt_type].
5417 					mcs_count[mcs]);
5418 		}
5419 
5420 		DP_PRINT_STATS("\n");
5421 	}
5422 
5423 	index = 0;
5424 	for (i = 0; i < SS_COUNT; i++) {
5425 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5426 				" %d", peer->stats.rx.nss[i]);
5427 	}
5428 	DP_PRINT_STATS("NSS(1-8) = %s",
5429 			nss);
5430 
5431 	DP_PRINT_STATS("Aggregation:");
5432 	DP_PRINT_STATS("	Msdu's Part of Ampdu = %d",
5433 			peer->stats.rx.ampdu_cnt);
5434 	DP_PRINT_STATS("	Msdu's With No Mpdu Level Aggregation = %d",
5435 			peer->stats.rx.non_ampdu_cnt);
5436 	DP_PRINT_STATS("	Msdu's Part of Amsdu = %d",
5437 			peer->stats.rx.amsdu_cnt);
5438 	DP_PRINT_STATS("	Msdu's With No Msdu Level Aggregation = %d",
5439 			peer->stats.rx.non_amsdu_cnt);
5440 }
5441 
5442 /**
5443  * dp_print_host_stats()- Function to print the stats aggregated at host
5444  * @vdev_handle: DP_VDEV handle
5445  * @type: host stats type
5446  *
5447  * Available Stat types
5448  * TXRX_CLEAR_STATS  : Clear the stats
5449  * TXRX_RX_RATE_STATS: Print Rx Rate Info
5450  * TXRX_TX_RATE_STATS: Print Tx Rate Info
5451  * TXRX_TX_HOST_STATS: Print Tx Stats
5452  * TXRX_RX_HOST_STATS: Print Rx Stats
5453  * TXRX_AST_STATS: Print AST Stats
5454  * TXRX_SRNG_PTR_STATS: Print SRNG ring pointer stats
5455  *
5456  * Return: 0 on success, print error message in case of failure
5457  */
5458 static int
5459 dp_print_host_stats(struct cdp_vdev *vdev_handle, enum cdp_host_txrx_stats type)
5460 {
5461 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5462 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5463 
5464 	dp_aggregate_pdev_stats(pdev);
5465 
5466 	switch (type) {
5467 	case TXRX_CLEAR_STATS:
5468 		dp_txrx_host_stats_clr(vdev);
5469 		break;
5470 	case TXRX_RX_RATE_STATS:
5471 		dp_print_rx_rates(vdev);
5472 		break;
5473 	case TXRX_TX_RATE_STATS:
5474 		dp_print_tx_rates(vdev);
5475 		break;
5476 	case TXRX_TX_HOST_STATS:
5477 		dp_print_pdev_tx_stats(pdev);
5478 		dp_print_soc_tx_stats(pdev->soc);
5479 		break;
5480 	case TXRX_RX_HOST_STATS:
5481 		dp_print_pdev_rx_stats(pdev);
5482 		dp_print_soc_rx_stats(pdev->soc);
5483 		break;
5484 	case TXRX_AST_STATS:
5485 		dp_print_ast_stats(pdev->soc);
5486 		break;
5487 	case TXRX_SRNG_PTR_STATS:
5488 		 dp_print_ring_stats(pdev);
5489 		 break;
5490 	default:
5491 		DP_TRACE(FATAL, "Wrong Input For TxRx Host Stats");
5492 		break;
5493 	}
5494 	return 0;
5495 }
5496 
5497 /*
5498  * dp_get_host_peer_stats()- function to print peer stats
5499  * @pdev_handle: DP_PDEV handle
5500  * @mac_addr: mac address of the peer
5501  *
5502  * Return: void
5503  */
5504 static void
5505 dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
5506 {
5507 	struct dp_peer *peer;
5508 	uint8_t local_id;
5509 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
5510 			&local_id);
5511 
5512 	if (!peer) {
5513 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5514 			"%s: Invalid peer\n", __func__);
5515 		return;
5516 	}
5517 
5518 	dp_print_peer_stats(peer);
5519 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
5520 	return;
5521 }
5522 
5523 /*
5524  * dp_ppdu_ring_reset()- Reset PPDU Stats ring
5525  * @pdev: DP_PDEV handle
5526  *
5527  * Return: void
5528  */
5529 static void
5530 dp_ppdu_ring_reset(struct dp_pdev *pdev)
5531 {
5532 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5533 
5534 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5535 
5536 	htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, pdev->pdev_id,
5537 		pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
5538 		RX_BUFFER_SIZE, &htt_tlv_filter);
5539 
5540 }
5541 
5542 /*
5543  * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
5544  * @pdev: DP_PDEV handle
5545  *
5546  * Return: void
5547  */
5548 static void
5549 dp_ppdu_ring_cfg(struct dp_pdev *pdev)
5550 {
5551 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
5552 
5553 	htt_tlv_filter.mpdu_start = 0;
5554 	htt_tlv_filter.msdu_start = 0;
5555 	htt_tlv_filter.packet = 0;
5556 	htt_tlv_filter.msdu_end = 0;
5557 	htt_tlv_filter.mpdu_end = 0;
5558 	htt_tlv_filter.packet_header = 1;
5559 	htt_tlv_filter.attention = 1;
5560 	htt_tlv_filter.ppdu_start = 1;
5561 	htt_tlv_filter.ppdu_end = 1;
5562 	htt_tlv_filter.ppdu_end_user_stats = 1;
5563 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5564 	htt_tlv_filter.ppdu_end_status_done = 1;
5565 	htt_tlv_filter.enable_fp = 1;
5566 	htt_tlv_filter.enable_md = 0;
5567 	if (pdev->mcopy_mode)
5568 		htt_tlv_filter.enable_mo = 1;
5569 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5570 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5571 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5572 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5573 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5574 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
5575 
5576 	htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, pdev->pdev_id,
5577 		pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
5578 		RX_BUFFER_SIZE, &htt_tlv_filter);
5579 }
5580 
5581 /*
5582  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
5583  * @pdev_handle: DP_PDEV handle
5584  * @val: user provided value
5585  *
5586  * Return: void
5587  */
5588 static void
5589 dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
5590 {
5591 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5592 
5593 	switch (val) {
5594 	case 0:
5595 		pdev->tx_sniffer_enable = 0;
5596 		pdev->mcopy_mode = 0;
5597 
5598 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en) {
5599 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
5600 			dp_ppdu_ring_reset(pdev);
5601 		} else if (pdev->enhanced_stats_en) {
5602 			dp_h2t_cfg_stats_msg_send(pdev,
5603 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
5604 		}
5605 		break;
5606 
5607 	case 1:
5608 		pdev->tx_sniffer_enable = 1;
5609 		pdev->mcopy_mode = 0;
5610 
5611 		if (!pdev->pktlog_ppdu_stats)
5612 			dp_h2t_cfg_stats_msg_send(pdev,
5613 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
5614 		break;
5615 	case 2:
5616 		pdev->mcopy_mode = 1;
5617 		pdev->tx_sniffer_enable = 0;
5618 		if (!pdev->enhanced_stats_en)
5619 			dp_ppdu_ring_cfg(pdev);
5620 
5621 		if (!pdev->pktlog_ppdu_stats)
5622 			dp_h2t_cfg_stats_msg_send(pdev,
5623 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
5624 		break;
5625 	default:
5626 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5627 			"Invalid value\n");
5628 		break;
5629 	}
5630 }
5631 
5632 /*
5633  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
5634  * @pdev_handle: DP_PDEV handle
5635  *
5636  * Return: void
5637  */
5638 static void
5639 dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
5640 {
5641 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5642 	pdev->enhanced_stats_en = 1;
5643 
5644 	if (!pdev->mcopy_mode)
5645 		dp_ppdu_ring_cfg(pdev);
5646 
5647 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable && !pdev->mcopy_mode)
5648 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
5649 }
5650 
5651 /*
5652  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
5653  * @pdev_handle: DP_PDEV handle
5654  *
5655  * Return: void
5656  */
5657 static void
5658 dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
5659 {
5660 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5661 
5662 	pdev->enhanced_stats_en = 0;
5663 
5664 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable && !pdev->mcopy_mode)
5665 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
5666 
5667 	if (!pdev->mcopy_mode)
5668 		dp_ppdu_ring_reset(pdev);
5669 }
5670 
5671 /*
5672  * dp_get_fw_peer_stats()- function to print peer stats
5673  * @pdev_handle: DP_PDEV handle
5674  * @mac_addr: mac address of the peer
5675  * @cap: Type of htt stats requested
5676  *
5677  * Currently Supporting only MAC ID based requests Only
5678  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
5679  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
5680  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
5681  *
5682  * Return: void
5683  */
5684 static void
5685 dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
5686 		uint32_t cap)
5687 {
5688 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5689 	int i;
5690 	uint32_t config_param0 = 0;
5691 	uint32_t config_param1 = 0;
5692 	uint32_t config_param2 = 0;
5693 	uint32_t config_param3 = 0;
5694 
5695 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
5696 	config_param0 |= (1 << (cap + 1));
5697 
5698 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
5699 		config_param1 |= (1 << i);
5700 	}
5701 
5702 	config_param2 |= (mac_addr[0] & 0x000000ff);
5703 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
5704 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
5705 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
5706 
5707 	config_param3 |= (mac_addr[4] & 0x000000ff);
5708 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
5709 
5710 	dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
5711 			config_param0, config_param1, config_param2,
5712 			config_param3, 0, 0, 0);
5713 
5714 }
5715 
5716 /* This struct definition will be removed from here
5717  * once it get added in FW headers*/
5718 struct httstats_cmd_req {
5719     uint32_t    config_param0;
5720     uint32_t    config_param1;
5721     uint32_t    config_param2;
5722     uint32_t    config_param3;
5723     int cookie;
5724     u_int8_t    stats_id;
5725 };
5726 
5727 /*
5728  * dp_get_htt_stats: function to process the httstas request
5729  * @pdev_handle: DP pdev handle
5730  * @data: pointer to request data
5731  * @data_len: length for request data
5732  *
5733  * return: void
5734  */
5735 static void
5736 dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
5737 {
5738 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5739 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
5740 
5741 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
5742 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
5743 				req->config_param0, req->config_param1,
5744 				req->config_param2, req->config_param3,
5745 				req->cookie, 0, 0);
5746 }
5747 /*
5748  * dp_set_pdev_param: function to set parameters in pdev
5749  * @pdev_handle: DP pdev handle
5750  * @param: parameter type to be set
5751  * @val: value of parameter to be set
5752  *
5753  * return: void
5754  */
5755 static void dp_set_pdev_param(struct cdp_pdev *pdev_handle,
5756 		enum cdp_pdev_param_type param, uint8_t val)
5757 {
5758 	switch (param) {
5759 	case CDP_CONFIG_DEBUG_SNIFFER:
5760 		dp_config_debug_sniffer(pdev_handle, val);
5761 		break;
5762 	default:
5763 		break;
5764 	}
5765 }
5766 
5767 /*
5768  * dp_set_vdev_param: function to set parameters in vdev
5769  * @param: parameter type to be set
5770  * @val: value of parameter to be set
5771  *
5772  * return: void
5773  */
5774 static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
5775 		enum cdp_vdev_param_type param, uint32_t val)
5776 {
5777 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5778 	switch (param) {
5779 	case CDP_ENABLE_WDS:
5780 		vdev->wds_enabled = val;
5781 		break;
5782 	case CDP_ENABLE_NAWDS:
5783 		vdev->nawds_enabled = val;
5784 		break;
5785 	case CDP_ENABLE_MCAST_EN:
5786 		vdev->mcast_enhancement_en = val;
5787 		break;
5788 	case CDP_ENABLE_PROXYSTA:
5789 		vdev->proxysta_vdev = val;
5790 		break;
5791 	case CDP_UPDATE_TDLS_FLAGS:
5792 		vdev->tdls_link_connected = val;
5793 		break;
5794 	case CDP_CFG_WDS_AGING_TIMER:
5795 		if (val == 0)
5796 			qdf_timer_stop(&vdev->pdev->soc->wds_aging_timer);
5797 		else if (val != vdev->wds_aging_timer_val)
5798 			qdf_timer_mod(&vdev->pdev->soc->wds_aging_timer, val);
5799 
5800 		vdev->wds_aging_timer_val = val;
5801 		break;
5802 	case CDP_ENABLE_AP_BRIDGE:
5803 		if (wlan_op_mode_sta != vdev->opmode)
5804 			vdev->ap_bridge_enabled = val;
5805 		else
5806 			vdev->ap_bridge_enabled = false;
5807 		break;
5808 	case CDP_ENABLE_CIPHER:
5809 		vdev->sec_type = val;
5810 		break;
5811 	case CDP_ENABLE_QWRAP_ISOLATION:
5812 		vdev->isolation_vdev = val;
5813 		break;
5814 	default:
5815 		break;
5816 	}
5817 
5818 	dp_tx_vdev_update_search_flags(vdev);
5819 }
5820 
5821 /**
5822  * dp_peer_set_nawds: set nawds bit in peer
5823  * @peer_handle: pointer to peer
5824  * @value: enable/disable nawds
5825  *
5826  * return: void
5827  */
5828 static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
5829 {
5830 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5831 	peer->nawds_enabled = value;
5832 }
5833 
5834 /*
5835  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
5836  * @vdev_handle: DP_VDEV handle
5837  * @map_id:ID of map that needs to be updated
5838  *
5839  * Return: void
5840  */
5841 static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
5842 		uint8_t map_id)
5843 {
5844 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5845 	vdev->dscp_tid_map_id = map_id;
5846 	return;
5847 }
5848 
5849 /*
5850  * dp_txrx_stats_publish(): publish pdev stats into a buffer
5851  * @pdev_handle: DP_PDEV handle
5852  * @buf: to hold pdev_stats
5853  *
5854  * Return: int
5855  */
5856 static int
5857 dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
5858 {
5859 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5860 	struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
5861 	struct cdp_txrx_stats_req req = {0,};
5862 
5863 	dp_aggregate_pdev_stats(pdev);
5864 	req.stats = HTT_DBG_EXT_STATS_PDEV_TX;
5865 	req.cookie_val = 1;
5866 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
5867 				req.param1, req.param2, req.param3, 0,
5868 				req.cookie_val, 0);
5869 
5870 	msleep(DP_MAX_SLEEP_TIME);
5871 
5872 	req.stats = HTT_DBG_EXT_STATS_PDEV_RX;
5873 	req.cookie_val = 1;
5874 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
5875 				req.param1, req.param2, req.param3, 0,
5876 				req.cookie_val, 0);
5877 
5878 	msleep(DP_MAX_SLEEP_TIME);
5879 	qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
5880 
5881 	return TXRX_STATS_LEVEL;
5882 }
5883 
5884 /**
5885  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
5886  * @pdev: DP_PDEV handle
5887  * @map_id: ID of map that needs to be updated
5888  * @tos: index value in map
5889  * @tid: tid value passed by the user
5890  *
5891  * Return: void
5892  */
5893 static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
5894 		uint8_t map_id, uint8_t tos, uint8_t tid)
5895 {
5896 	uint8_t dscp;
5897 	struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
5898 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
5899 	pdev->dscp_tid_map[map_id][dscp] = tid;
5900 	if (map_id < HAL_MAX_HW_DSCP_TID_MAPS)
5901 		hal_tx_update_dscp_tid(pdev->soc->hal_soc, tid,
5902 			map_id, dscp);
5903 	return;
5904 }
5905 
5906 /**
5907  * dp_fw_stats_process(): Process TxRX FW stats request
5908  * @vdev_handle: DP VDEV handle
5909  * @req: stats request
5910  *
5911  * return: int
5912  */
5913 static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
5914 		struct cdp_txrx_stats_req *req)
5915 {
5916 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5917 	struct dp_pdev *pdev = NULL;
5918 	uint32_t stats = req->stats;
5919 	uint8_t channel = req->channel;
5920 
5921 	if (!vdev) {
5922 		DP_TRACE(NONE, "VDEV not found");
5923 		return 1;
5924 	}
5925 	pdev = vdev->pdev;
5926 
5927 	/*
5928 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
5929 	 * from param0 to param3 according to below rule:
5930 	 *
5931 	 * PARAM:
5932 	 *   - config_param0 : start_offset (stats type)
5933 	 *   - config_param1 : stats bmask from start offset
5934 	 *   - config_param2 : stats bmask from start offset + 32
5935 	 *   - config_param3 : stats bmask from start offset + 64
5936 	 */
5937 	if (req->stats == CDP_TXRX_STATS_0) {
5938 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
5939 		req->param1 = 0xFFFFFFFF;
5940 		req->param2 = 0xFFFFFFFF;
5941 		req->param3 = 0xFFFFFFFF;
5942 	}
5943 
5944 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
5945 				req->param1, req->param2, req->param3,
5946 				0, 0, channel);
5947 }
5948 
5949 /**
5950  * dp_txrx_stats_request - function to map to firmware and host stats
5951  * @vdev: virtual handle
5952  * @req: stats request
5953  *
5954  * Return: integer
5955  */
5956 static int dp_txrx_stats_request(struct cdp_vdev *vdev,
5957 		struct cdp_txrx_stats_req *req)
5958 {
5959 	int host_stats;
5960 	int fw_stats;
5961 	enum cdp_stats stats;
5962 
5963 	if (!vdev || !req) {
5964 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5965 				"Invalid vdev/req instance");
5966 		return 0;
5967 	}
5968 
5969 	stats = req->stats;
5970 	if (stats >= CDP_TXRX_MAX_STATS)
5971 		return 0;
5972 
5973 	/*
5974 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
5975 	 *			has to be updated if new FW HTT stats added
5976 	 */
5977 	if (stats > CDP_TXRX_STATS_HTT_MAX)
5978 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
5979 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
5980 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
5981 
5982 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5983 		 "stats: %u fw_stats_type: %d host_stats_type: %d",
5984 		  stats, fw_stats, host_stats);
5985 
5986 	if (fw_stats != TXRX_FW_STATS_INVALID) {
5987 		/* update request with FW stats type */
5988 		req->stats = fw_stats;
5989 		return dp_fw_stats_process(vdev, req);
5990 	}
5991 
5992 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
5993 			(host_stats <= TXRX_HOST_STATS_MAX))
5994 		return dp_print_host_stats(vdev, host_stats);
5995 	else
5996 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5997 				"Wrong Input for TxRx Stats");
5998 
5999 	return 0;
6000 }
6001 
6002 /*
6003  * dp_print_napi_stats(): NAPI stats
6004  * @soc - soc handle
6005  */
6006 static void dp_print_napi_stats(struct dp_soc *soc)
6007 {
6008 	hif_print_napi_stats(soc->hif_handle);
6009 }
6010 
6011 /*
6012  * dp_print_per_ring_stats(): Packet count per ring
6013  * @soc - soc handle
6014  */
6015 static void dp_print_per_ring_stats(struct dp_soc *soc)
6016 {
6017 	uint8_t ring;
6018 	uint16_t core;
6019 	uint64_t total_packets;
6020 
6021 	DP_TRACE(FATAL, "Reo packets per ring:");
6022 	for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
6023 		total_packets = 0;
6024 		DP_TRACE(FATAL, "Packets on ring %u:", ring);
6025 		for (core = 0; core < NR_CPUS; core++) {
6026 			DP_TRACE(FATAL, "Packets arriving on core %u: %llu",
6027 				core, soc->stats.rx.ring_packets[core][ring]);
6028 			total_packets += soc->stats.rx.ring_packets[core][ring];
6029 		}
6030 		DP_TRACE(FATAL, "Total packets on ring %u: %llu",
6031 			ring, total_packets);
6032 	}
6033 }
6034 
6035 /*
6036  * dp_txrx_path_stats() - Function to display dump stats
6037  * @soc - soc handle
6038  *
6039  * return: none
6040  */
6041 static void dp_txrx_path_stats(struct dp_soc *soc)
6042 {
6043 	uint8_t error_code;
6044 	uint8_t loop_pdev;
6045 	struct dp_pdev *pdev;
6046 	uint8_t i;
6047 
6048 	for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
6049 
6050 		pdev = soc->pdev_list[loop_pdev];
6051 		dp_aggregate_pdev_stats(pdev);
6052 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6053 			"Tx path Statistics:");
6054 
6055 		DP_TRACE(FATAL, "from stack: %u msdus (%llu bytes)",
6056 			pdev->stats.tx_i.rcvd.num,
6057 			pdev->stats.tx_i.rcvd.bytes);
6058 		DP_TRACE(FATAL, "processed from host: %u msdus (%llu bytes)",
6059 			pdev->stats.tx_i.processed.num,
6060 			pdev->stats.tx_i.processed.bytes);
6061 		DP_TRACE(FATAL, "successfully transmitted: %u msdus (%llu bytes)",
6062 			pdev->stats.tx.tx_success.num,
6063 			pdev->stats.tx.tx_success.bytes);
6064 
6065 		DP_TRACE(FATAL, "Dropped in host:");
6066 		DP_TRACE(FATAL, "Total packets dropped: %u,",
6067 			pdev->stats.tx_i.dropped.dropped_pkt.num);
6068 		DP_TRACE(FATAL, "Descriptor not available: %u",
6069 			pdev->stats.tx_i.dropped.desc_na);
6070 		DP_TRACE(FATAL, "Ring full: %u",
6071 			pdev->stats.tx_i.dropped.ring_full);
6072 		DP_TRACE(FATAL, "Enqueue fail: %u",
6073 			pdev->stats.tx_i.dropped.enqueue_fail);
6074 		DP_TRACE(FATAL, "DMA Error: %u",
6075 			pdev->stats.tx_i.dropped.dma_error);
6076 
6077 		DP_TRACE(FATAL, "Dropped in hardware:");
6078 		DP_TRACE(FATAL, "total packets dropped: %u",
6079 			pdev->stats.tx.tx_failed);
6080 		DP_TRACE(FATAL, "mpdu age out: %u",
6081 			pdev->stats.tx.dropped.age_out);
6082 		DP_TRACE(FATAL, "firmware removed: %u",
6083 			pdev->stats.tx.dropped.fw_rem);
6084 		DP_TRACE(FATAL, "firmware removed tx: %u",
6085 			pdev->stats.tx.dropped.fw_rem_tx);
6086 		DP_TRACE(FATAL, "firmware removed notx %u",
6087 			pdev->stats.tx.dropped.fw_rem_notx);
6088 		DP_TRACE(FATAL, "peer_invalid: %u",
6089 			pdev->soc->stats.tx.tx_invalid_peer.num);
6090 
6091 
6092 		DP_TRACE(FATAL, "Tx packets sent per interrupt:");
6093 		DP_TRACE(FATAL, "Single Packet: %u",
6094 			pdev->stats.tx_comp_histogram.pkts_1);
6095 		DP_TRACE(FATAL, "2-20 Packets:  %u",
6096 			pdev->stats.tx_comp_histogram.pkts_2_20);
6097 		DP_TRACE(FATAL, "21-40 Packets: %u",
6098 			pdev->stats.tx_comp_histogram.pkts_21_40);
6099 		DP_TRACE(FATAL, "41-60 Packets: %u",
6100 			pdev->stats.tx_comp_histogram.pkts_41_60);
6101 		DP_TRACE(FATAL, "61-80 Packets: %u",
6102 			pdev->stats.tx_comp_histogram.pkts_61_80);
6103 		DP_TRACE(FATAL, "81-100 Packets: %u",
6104 			pdev->stats.tx_comp_histogram.pkts_81_100);
6105 		DP_TRACE(FATAL, "101-200 Packets: %u",
6106 			pdev->stats.tx_comp_histogram.pkts_101_200);
6107 		DP_TRACE(FATAL, "   201+ Packets: %u",
6108 			pdev->stats.tx_comp_histogram.pkts_201_plus);
6109 
6110 		DP_TRACE(FATAL, "Rx path statistics");
6111 
6112 		DP_TRACE(FATAL, "delivered %u msdus ( %llu bytes),",
6113 			pdev->stats.rx.to_stack.num,
6114 			pdev->stats.rx.to_stack.bytes);
6115 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)
6116 			DP_TRACE(FATAL, "received on reo[%d] %u msdus ( %llu bytes),",
6117 					i, pdev->stats.rx.rcvd_reo[i].num,
6118 					pdev->stats.rx.rcvd_reo[i].bytes);
6119 		DP_TRACE(FATAL, "intra-bss packets %u msdus ( %llu bytes),",
6120 			pdev->stats.rx.intra_bss.pkts.num,
6121 			pdev->stats.rx.intra_bss.pkts.bytes);
6122 		DP_TRACE(FATAL, "intra-bss fails %u msdus ( %llu bytes),",
6123 			pdev->stats.rx.intra_bss.fail.num,
6124 			pdev->stats.rx.intra_bss.fail.bytes);
6125 		DP_TRACE(FATAL, "raw packets %u msdus ( %llu bytes),",
6126 			pdev->stats.rx.raw.num,
6127 			pdev->stats.rx.raw.bytes);
6128 		DP_TRACE(FATAL, "dropped: error %u msdus",
6129 			pdev->stats.rx.err.mic_err);
6130 		DP_TRACE(FATAL, "peer invalid %u",
6131 			pdev->soc->stats.rx.err.rx_invalid_peer.num);
6132 
6133 		DP_TRACE(FATAL, "Reo Statistics");
6134 		DP_TRACE(FATAL, "rbm error: %u msdus",
6135 			pdev->soc->stats.rx.err.invalid_rbm);
6136 		DP_TRACE(FATAL, "hal ring access fail: %u msdus",
6137 			pdev->soc->stats.rx.err.hal_ring_access_fail);
6138 
6139 		DP_TRACE(FATAL, "Reo errors");
6140 
6141 		for (error_code = 0; error_code < HAL_REO_ERR_MAX;
6142 				error_code++) {
6143 			DP_TRACE(FATAL, "Reo error number (%u): %u msdus",
6144 				error_code,
6145 				pdev->soc->stats.rx.err.reo_error[error_code]);
6146 		}
6147 
6148 		for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
6149 				error_code++) {
6150 			DP_TRACE(FATAL, "Rxdma error number (%u): %u msdus",
6151 				error_code,
6152 				pdev->soc->stats.rx.err
6153 				.rxdma_error[error_code]);
6154 		}
6155 
6156 		DP_TRACE(FATAL, "Rx packets reaped per interrupt:");
6157 		DP_TRACE(FATAL, "Single Packet: %u",
6158 			 pdev->stats.rx_ind_histogram.pkts_1);
6159 		DP_TRACE(FATAL, "2-20 Packets:  %u",
6160 			 pdev->stats.rx_ind_histogram.pkts_2_20);
6161 		DP_TRACE(FATAL, "21-40 Packets: %u",
6162 			 pdev->stats.rx_ind_histogram.pkts_21_40);
6163 		DP_TRACE(FATAL, "41-60 Packets: %u",
6164 			 pdev->stats.rx_ind_histogram.pkts_41_60);
6165 		DP_TRACE(FATAL, "61-80 Packets: %u",
6166 			 pdev->stats.rx_ind_histogram.pkts_61_80);
6167 		DP_TRACE(FATAL, "81-100 Packets: %u",
6168 			 pdev->stats.rx_ind_histogram.pkts_81_100);
6169 		DP_TRACE(FATAL, "101-200 Packets: %u",
6170 			 pdev->stats.rx_ind_histogram.pkts_101_200);
6171 		DP_TRACE(FATAL, "   201+ Packets: %u",
6172 			 pdev->stats.rx_ind_histogram.pkts_201_plus);
6173 
6174 		DP_TRACE_STATS(ERROR, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
6175 			__func__,
6176 			pdev->soc->wlan_cfg_ctx->tso_enabled,
6177 			pdev->soc->wlan_cfg_ctx->lro_enabled,
6178 			pdev->soc->wlan_cfg_ctx->rx_hash,
6179 			pdev->soc->wlan_cfg_ctx->napi_enabled);
6180 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6181 		DP_TRACE_STATS(ERROR, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
6182 			__func__,
6183 			pdev->soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold,
6184 			pdev->soc->wlan_cfg_ctx->tx_flow_start_queue_offset);
6185 #endif
6186 	}
6187 }
6188 
6189 /*
6190  * dp_txrx_dump_stats() -  Dump statistics
6191  * @value - Statistics option
6192  */
6193 static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
6194 				     enum qdf_stats_verbosity_level level)
6195 {
6196 	struct dp_soc *soc =
6197 		(struct dp_soc *)psoc;
6198 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6199 
6200 	if (!soc) {
6201 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6202 			"%s: soc is NULL", __func__);
6203 		return QDF_STATUS_E_INVAL;
6204 	}
6205 
6206 	switch (value) {
6207 	case CDP_TXRX_PATH_STATS:
6208 		dp_txrx_path_stats(soc);
6209 		break;
6210 
6211 	case CDP_RX_RING_STATS:
6212 		dp_print_per_ring_stats(soc);
6213 		break;
6214 
6215 	case CDP_TXRX_TSO_STATS:
6216 		/* TODO: NOT IMPLEMENTED */
6217 		break;
6218 
6219 	case CDP_DUMP_TX_FLOW_POOL_INFO:
6220 		cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
6221 		break;
6222 
6223 	case CDP_DP_NAPI_STATS:
6224 		dp_print_napi_stats(soc);
6225 		break;
6226 
6227 	case CDP_TXRX_DESC_STATS:
6228 		/* TODO: NOT IMPLEMENTED */
6229 		break;
6230 
6231 	default:
6232 		status = QDF_STATUS_E_INVAL;
6233 		break;
6234 	}
6235 
6236 	return status;
6237 
6238 }
6239 
6240 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6241 /**
6242  * dp_update_flow_control_parameters() - API to store datapath
6243  *                            config parameters
6244  * @soc: soc handle
6245  * @cfg: ini parameter handle
6246  *
6247  * Return: void
6248  */
6249 static inline
6250 void dp_update_flow_control_parameters(struct dp_soc *soc,
6251 				struct cdp_config_params *params)
6252 {
6253 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
6254 					params->tx_flow_stop_queue_threshold;
6255 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
6256 					params->tx_flow_start_queue_offset;
6257 }
6258 #else
6259 static inline
6260 void dp_update_flow_control_parameters(struct dp_soc *soc,
6261 				struct cdp_config_params *params)
6262 {
6263 }
6264 #endif
6265 
6266 /**
6267  * dp_update_config_parameters() - API to store datapath
6268  *                            config parameters
6269  * @soc: soc handle
6270  * @cfg: ini parameter handle
6271  *
6272  * Return: status
6273  */
6274 static
6275 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
6276 				struct cdp_config_params *params)
6277 {
6278 	struct dp_soc *soc = (struct dp_soc *)psoc;
6279 
6280 	if (!(soc)) {
6281 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6282 				"%s: Invalid handle", __func__);
6283 		return QDF_STATUS_E_INVAL;
6284 	}
6285 
6286 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
6287 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
6288 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
6289 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
6290 				params->tcp_udp_checksumoffload;
6291 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
6292 
6293 	dp_update_flow_control_parameters(soc, params);
6294 
6295 	return QDF_STATUS_SUCCESS;
6296 }
6297 
6298 /**
6299  * dp_txrx_set_wds_rx_policy() - API to store datapath
6300  *                            config parameters
6301  * @vdev_handle - datapath vdev handle
6302  * @cfg: ini parameter handle
6303  *
6304  * Return: status
6305  */
6306 #ifdef WDS_VENDOR_EXTENSION
6307 void
6308 dp_txrx_set_wds_rx_policy(
6309 		struct cdp_vdev *vdev_handle,
6310 		u_int32_t val)
6311 {
6312 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6313 	struct dp_peer *peer;
6314 	if (vdev->opmode == wlan_op_mode_ap) {
6315 		/* for ap, set it on bss_peer */
6316 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6317 			if (peer->bss_peer) {
6318 				peer->wds_ecm.wds_rx_filter = 1;
6319 				peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
6320 				peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
6321 				break;
6322 			}
6323 		}
6324 	} else if (vdev->opmode == wlan_op_mode_sta) {
6325 		peer = TAILQ_FIRST(&vdev->peer_list);
6326 		peer->wds_ecm.wds_rx_filter = 1;
6327 		peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
6328 		peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
6329 	}
6330 }
6331 
6332 /**
6333  * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
6334  *
6335  * @peer_handle - datapath peer handle
6336  * @wds_tx_ucast: policy for unicast transmission
6337  * @wds_tx_mcast: policy for multicast transmission
6338  *
6339  * Return: void
6340  */
6341 void
6342 dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
6343 		int wds_tx_ucast, int wds_tx_mcast)
6344 {
6345 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6346 	if (wds_tx_ucast || wds_tx_mcast) {
6347 		peer->wds_enabled = 1;
6348 		peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
6349 		peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
6350 	} else {
6351 		peer->wds_enabled = 0;
6352 		peer->wds_ecm.wds_tx_ucast_4addr = 0;
6353 		peer->wds_ecm.wds_tx_mcast_4addr = 0;
6354 	}
6355 
6356 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6357 			FL("Policy Update set to :\
6358 				peer->wds_enabled %d\
6359 				peer->wds_ecm.wds_tx_ucast_4addr %d\
6360 				peer->wds_ecm.wds_tx_mcast_4addr %d\n"),
6361 				peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
6362 				peer->wds_ecm.wds_tx_mcast_4addr);
6363 	return;
6364 }
6365 #endif
6366 
6367 static struct cdp_wds_ops dp_ops_wds = {
6368 	.vdev_set_wds = dp_vdev_set_wds,
6369 #ifdef WDS_VENDOR_EXTENSION
6370 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
6371 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
6372 #endif
6373 };
6374 
6375 /*
6376  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
6377  * @soc - datapath soc handle
6378  * @peer - datapath peer handle
6379  *
6380  * Delete the AST entries belonging to a peer
6381  */
6382 #ifdef FEATURE_WDS
6383 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
6384 		struct dp_peer *peer)
6385 {
6386 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
6387 
6388 	qdf_spin_lock_bh(&soc->ast_lock);
6389 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
6390 		dp_peer_del_ast(soc, ast_entry);
6391 
6392 	qdf_spin_unlock_bh(&soc->ast_lock);
6393 }
6394 #else
6395 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
6396 		struct dp_peer *peer)
6397 {
6398 }
6399 #endif
6400 
6401 /*
6402  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
6403  * @vdev_handle - datapath vdev handle
6404  * @callback - callback function
6405  * @ctxt: callback context
6406  *
6407  */
6408 static void
6409 dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
6410 		       ol_txrx_data_tx_cb callback, void *ctxt)
6411 {
6412 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6413 
6414 	vdev->tx_non_std_data_callback.func = callback;
6415 	vdev->tx_non_std_data_callback.ctxt = ctxt;
6416 }
6417 
6418 /**
6419  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
6420  * @pdev_hdl: datapath pdev handle
6421  *
6422  * Return: opaque pointer to dp txrx handle
6423  */
6424 static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
6425 {
6426 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
6427 
6428 	return pdev->dp_txrx_handle;
6429 }
6430 
6431 /**
6432  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
6433  * @pdev_hdl: datapath pdev handle
6434  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
6435  *
6436  * Return: void
6437  */
6438 static void
6439 dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
6440 {
6441 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
6442 
6443 	pdev->dp_txrx_handle = dp_txrx_hdl;
6444 }
6445 
6446 /**
6447  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
6448  * @soc_handle: datapath soc handle
6449  *
6450  * Return: opaque pointer to external dp (non-core DP)
6451  */
6452 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
6453 {
6454 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
6455 
6456 	return soc->external_txrx_handle;
6457 }
6458 
6459 /**
6460  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
6461  * @soc_handle: datapath soc handle
6462  * @txrx_handle: opaque pointer to external dp (non-core DP)
6463  *
6464  * Return: void
6465  */
6466 static void
6467 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
6468 {
6469 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
6470 
6471 	soc->external_txrx_handle = txrx_handle;
6472 }
6473 
6474 #ifdef CONFIG_WIN
6475 static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
6476 {
6477 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
6478 	struct dp_peer *peer = (struct dp_peer *) peer_hdl;
6479 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
6480 
6481 	peer->delete_in_progress = true;
6482 	dp_peer_delete_ast_entries(soc, peer);
6483 }
6484 #endif
6485 
6486 #ifdef ATH_SUPPORT_NAC_RSSI
6487 static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
6488 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
6489 		uint8_t chan_num)
6490 {
6491 
6492 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6493 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6494 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
6495 
6496 	pdev->nac_rssi_filtering = 1;
6497 	/* Store address of NAC (neighbour peer) which will be checked
6498 	 * against TA of received packets.
6499 	 */
6500 
6501 	if (cmd == CDP_NAC_PARAM_ADD) {
6502 		qdf_mem_copy(vdev->cdp_nac_rssi.client_mac,
6503 				client_macaddr, DP_MAC_ADDR_LEN);
6504 		vdev->cdp_nac_rssi_enabled = 1;
6505 	} else if (cmd == CDP_NAC_PARAM_DEL) {
6506 		if (!qdf_mem_cmp(vdev->cdp_nac_rssi.client_mac,
6507 			client_macaddr, DP_MAC_ADDR_LEN)) {
6508 				/* delete this peer from the list */
6509 			qdf_mem_zero(vdev->cdp_nac_rssi.client_mac,
6510 				DP_MAC_ADDR_LEN);
6511 		}
6512 		vdev->cdp_nac_rssi_enabled = 0;
6513 	}
6514 
6515 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
6516 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
6517 			(vdev->pdev->osif_pdev, vdev->vdev_id, cmd, bssid);
6518 
6519 	return QDF_STATUS_SUCCESS;
6520 }
6521 #endif
6522 
6523 static struct cdp_cmn_ops dp_ops_cmn = {
6524 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
6525 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
6526 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
6527 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
6528 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
6529 	.txrx_peer_create = dp_peer_create_wifi3,
6530 	.txrx_peer_setup = dp_peer_setup_wifi3,
6531 #ifdef CONFIG_WIN
6532 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
6533 #else
6534 	.txrx_peer_teardown = NULL,
6535 #endif
6536 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
6537 	.txrx_peer_del_ast = dp_peer_del_ast_wifi3,
6538 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
6539 	.txrx_peer_ast_hash_find = dp_peer_ast_hash_find_wifi3,
6540 	.txrx_peer_ast_get_pdev_id = dp_peer_ast_get_pdev_id_wifi3,
6541 	.txrx_peer_ast_get_next_hop = dp_peer_ast_get_next_hop_wifi3,
6542 	.txrx_peer_ast_set_type = dp_peer_ast_set_type_wifi3,
6543 	.txrx_peer_delete = dp_peer_delete_wifi3,
6544 	.txrx_vdev_register = dp_vdev_register_wifi3,
6545 	.txrx_soc_detach = dp_soc_detach_wifi3,
6546 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
6547 	.txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
6548 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
6549 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
6550 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
6551 	.delba_process = dp_delba_process_wifi3,
6552 	.set_addba_response = dp_set_addba_response,
6553 	.get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
6554 	.flush_cache_rx_queue = NULL,
6555 	/* TODO: get API's for dscp-tid need to be added*/
6556 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
6557 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
6558 	.txrx_stats_request = dp_txrx_stats_request,
6559 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
6560 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
6561 	.txrx_set_nac = dp_set_nac,
6562 	.txrx_get_tx_pending = dp_get_tx_pending,
6563 	.txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
6564 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
6565 	.display_stats = dp_txrx_dump_stats,
6566 	.txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
6567 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
6568 #ifdef DP_INTR_POLL_BASED
6569 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
6570 #else
6571 	.txrx_intr_attach = dp_soc_interrupt_attach,
6572 #endif
6573 	.txrx_intr_detach = dp_soc_interrupt_detach,
6574 	.set_pn_check = dp_set_pn_check_wifi3,
6575 	.update_config_parameters = dp_update_config_parameters,
6576 	/* TODO: Add other functions */
6577 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
6578 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
6579 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
6580 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
6581 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
6582 	.tx_send = dp_tx_send,
6583 };
6584 
6585 static struct cdp_ctrl_ops dp_ops_ctrl = {
6586 	.txrx_peer_authorize = dp_peer_authorize,
6587 #ifdef QCA_SUPPORT_SON
6588 	.txrx_set_inact_params = dp_set_inact_params,
6589 	.txrx_start_inact_timer = dp_start_inact_timer,
6590 	.txrx_set_overload = dp_set_overload,
6591 	.txrx_peer_is_inact = dp_peer_is_inact,
6592 	.txrx_mark_peer_inact = dp_mark_peer_inact,
6593 #endif
6594 	.txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
6595 	.txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
6596 #ifdef MESH_MODE_SUPPORT
6597 	.txrx_set_mesh_mode  = dp_peer_set_mesh_mode,
6598 	.txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
6599 #endif
6600 	.txrx_set_vdev_param = dp_set_vdev_param,
6601 	.txrx_peer_set_nawds = dp_peer_set_nawds,
6602 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
6603 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
6604 	.txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
6605 	.txrx_update_filter_neighbour_peers =
6606 		dp_update_filter_neighbour_peers,
6607 	.txrx_get_sec_type = dp_get_sec_type,
6608 	/* TODO: Add other functions */
6609 	.txrx_wdi_event_sub = dp_wdi_event_sub,
6610 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
6611 #ifdef WDI_EVENT_ENABLE
6612 	.txrx_get_pldev = dp_get_pldev,
6613 #endif
6614 	.txrx_set_pdev_param = dp_set_pdev_param,
6615 #ifdef ATH_SUPPORT_NAC_RSSI
6616 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
6617 #endif
6618 };
6619 
6620 static struct cdp_me_ops dp_ops_me = {
6621 #ifdef ATH_SUPPORT_IQUE
6622 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
6623 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
6624 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
6625 #endif
6626 };
6627 
6628 static struct cdp_mon_ops dp_ops_mon = {
6629 	.txrx_monitor_set_filter_ucast_data = NULL,
6630 	.txrx_monitor_set_filter_mcast_data = NULL,
6631 	.txrx_monitor_set_filter_non_data = NULL,
6632 	.txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
6633 	.txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
6634 	.txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
6635 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
6636 	/* Added support for HK advance filter */
6637 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
6638 };
6639 
6640 static struct cdp_host_stats_ops dp_ops_host_stats = {
6641 	.txrx_per_peer_stats = dp_get_host_peer_stats,
6642 	.get_fw_peer_stats = dp_get_fw_peer_stats,
6643 	.get_htt_stats = dp_get_htt_stats,
6644 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
6645 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
6646 	.txrx_stats_publish = dp_txrx_stats_publish,
6647 	/* TODO */
6648 };
6649 
6650 static struct cdp_raw_ops dp_ops_raw = {
6651 	/* TODO */
6652 };
6653 
6654 #ifdef CONFIG_WIN
6655 static struct cdp_pflow_ops dp_ops_pflow = {
6656 	/* TODO */
6657 };
6658 #endif /* CONFIG_WIN */
6659 
6660 #ifdef FEATURE_RUNTIME_PM
6661 /**
6662  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
6663  * @opaque_pdev: DP pdev context
6664  *
6665  * DP is ready to runtime suspend if there are no pending TX packets.
6666  *
6667  * Return: QDF_STATUS
6668  */
6669 static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
6670 {
6671 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
6672 	struct dp_soc *soc = pdev->soc;
6673 
6674 	/* Call DP TX flow control API to check if there is any
6675 	   pending packets */
6676 
6677 	if (soc->intr_mode == DP_INTR_POLL)
6678 		qdf_timer_stop(&soc->int_timer);
6679 
6680 	return QDF_STATUS_SUCCESS;
6681 }
6682 
6683 /**
6684  * dp_runtime_resume() - ensure DP is ready to runtime resume
6685  * @opaque_pdev: DP pdev context
6686  *
6687  * Resume DP for runtime PM.
6688  *
6689  * Return: QDF_STATUS
6690  */
6691 static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
6692 {
6693 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
6694 	struct dp_soc *soc = pdev->soc;
6695 	void *hal_srng;
6696 	int i;
6697 
6698 	if (soc->intr_mode == DP_INTR_POLL)
6699 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
6700 
6701 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
6702 		hal_srng = soc->tcl_data_ring[i].hal_srng;
6703 		if (hal_srng) {
6704 			/* We actually only need to acquire the lock */
6705 			hal_srng_access_start(soc->hal_soc, hal_srng);
6706 			/* Update SRC ring head pointer for HW to send
6707 			   all pending packets */
6708 			hal_srng_access_end(soc->hal_soc, hal_srng);
6709 		}
6710 	}
6711 
6712 	return QDF_STATUS_SUCCESS;
6713 }
6714 #endif /* FEATURE_RUNTIME_PM */
6715 
6716 static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
6717 {
6718 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
6719 	struct dp_soc *soc = pdev->soc;
6720 
6721 	if (soc->intr_mode == DP_INTR_POLL)
6722 		qdf_timer_stop(&soc->int_timer);
6723 
6724 	return QDF_STATUS_SUCCESS;
6725 }
6726 
6727 static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
6728 {
6729 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
6730 	struct dp_soc *soc = pdev->soc;
6731 
6732 	if (soc->intr_mode == DP_INTR_POLL)
6733 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
6734 
6735 	return QDF_STATUS_SUCCESS;
6736 }
6737 
6738 #ifndef CONFIG_WIN
6739 static struct cdp_misc_ops dp_ops_misc = {
6740 	.tx_non_std = dp_tx_non_std,
6741 	.get_opmode = dp_get_opmode,
6742 #ifdef FEATURE_RUNTIME_PM
6743 	.runtime_suspend = dp_runtime_suspend,
6744 	.runtime_resume = dp_runtime_resume,
6745 #endif /* FEATURE_RUNTIME_PM */
6746 	.pkt_log_init = dp_pkt_log_init,
6747 	.pkt_log_con_service = dp_pkt_log_con_service,
6748 };
6749 
6750 static struct cdp_flowctl_ops dp_ops_flowctl = {
6751 	/* WIFI 3.0 DP implement as required. */
6752 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6753 	.register_pause_cb = dp_txrx_register_pause_cb,
6754 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
6755 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
6756 };
6757 
6758 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
6759 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
6760 };
6761 
6762 #ifdef IPA_OFFLOAD
6763 static struct cdp_ipa_ops dp_ops_ipa = {
6764 	.ipa_get_resource = dp_ipa_get_resource,
6765 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
6766 	.ipa_op_response = dp_ipa_op_response,
6767 	.ipa_register_op_cb = dp_ipa_register_op_cb,
6768 	.ipa_get_stat = dp_ipa_get_stat,
6769 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
6770 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
6771 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
6772 	.ipa_setup = dp_ipa_setup,
6773 	.ipa_cleanup = dp_ipa_cleanup,
6774 	.ipa_setup_iface = dp_ipa_setup_iface,
6775 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
6776 	.ipa_enable_pipes = dp_ipa_enable_pipes,
6777 	.ipa_disable_pipes = dp_ipa_disable_pipes,
6778 	.ipa_set_perf_level = dp_ipa_set_perf_level
6779 };
6780 #endif
6781 
6782 static struct cdp_bus_ops dp_ops_bus = {
6783 	.bus_suspend = dp_bus_suspend,
6784 	.bus_resume = dp_bus_resume
6785 };
6786 
6787 static struct cdp_ocb_ops dp_ops_ocb = {
6788 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
6789 };
6790 
6791 
6792 static struct cdp_throttle_ops dp_ops_throttle = {
6793 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
6794 };
6795 
6796 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
6797 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
6798 };
6799 
6800 static struct cdp_cfg_ops dp_ops_cfg = {
6801 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
6802 };
6803 
6804 /*
6805  * dp_wrapper_peer_get_ref_by_addr - wrapper function to get to peer
6806  * @dev: physical device instance
6807  * @peer_mac_addr: peer mac address
6808  * @local_id: local id for the peer
6809  * @debug_id: to track enum peer access
6810 
6811  * Return: peer instance pointer
6812  */
6813 static inline void *
6814 dp_wrapper_peer_get_ref_by_addr(struct cdp_pdev *dev, u8 *peer_mac_addr,
6815 				u8 *local_id,
6816 				enum peer_debug_id_type debug_id)
6817 {
6818 	/*
6819 	 * Currently this function does not implement the "get ref"
6820 	 * functionality and is mapped to dp_find_peer_by_addr which does not
6821 	 * increment the peer ref count. So the peer state is uncertain after
6822 	 * calling this API. The functionality needs to be implemented.
6823 	 * Accordingly the corresponding release_ref function is NULL.
6824 	 */
6825 	return dp_find_peer_by_addr(dev, peer_mac_addr, local_id);
6826 }
6827 
6828 static struct cdp_peer_ops dp_ops_peer = {
6829 	.register_peer = dp_register_peer,
6830 	.clear_peer = dp_clear_peer,
6831 	.find_peer_by_addr = dp_find_peer_by_addr,
6832 	.find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
6833 	.peer_get_ref_by_addr = dp_wrapper_peer_get_ref_by_addr,
6834 	.peer_release_ref = NULL,
6835 	.local_peer_id = dp_local_peer_id,
6836 	.peer_find_by_local_id = dp_peer_find_by_local_id,
6837 	.peer_state_update = dp_peer_state_update,
6838 	.get_vdevid = dp_get_vdevid,
6839 	.get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
6840 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
6841 	.get_vdev_for_peer = dp_get_vdev_for_peer,
6842 	.get_peer_state = dp_get_peer_state,
6843 	.last_assoc_received = dp_get_last_assoc_received,
6844 	.last_disassoc_received = dp_get_last_disassoc_received,
6845 	.last_deauth_received = dp_get_last_deauth_received,
6846 };
6847 #endif
6848 
6849 static struct cdp_ops dp_txrx_ops = {
6850 	.cmn_drv_ops = &dp_ops_cmn,
6851 	.ctrl_ops = &dp_ops_ctrl,
6852 	.me_ops = &dp_ops_me,
6853 	.mon_ops = &dp_ops_mon,
6854 	.host_stats_ops = &dp_ops_host_stats,
6855 	.wds_ops = &dp_ops_wds,
6856 	.raw_ops = &dp_ops_raw,
6857 #ifdef CONFIG_WIN
6858 	.pflow_ops = &dp_ops_pflow,
6859 #endif /* CONFIG_WIN */
6860 #ifndef CONFIG_WIN
6861 	.misc_ops = &dp_ops_misc,
6862 	.cfg_ops = &dp_ops_cfg,
6863 	.flowctl_ops = &dp_ops_flowctl,
6864 	.l_flowctl_ops = &dp_ops_l_flowctl,
6865 #ifdef IPA_OFFLOAD
6866 	.ipa_ops = &dp_ops_ipa,
6867 #endif
6868 	.bus_ops = &dp_ops_bus,
6869 	.ocb_ops = &dp_ops_ocb,
6870 	.peer_ops = &dp_ops_peer,
6871 	.throttle_ops = &dp_ops_throttle,
6872 	.mob_stats_ops = &dp_ops_mob_stats,
6873 #endif
6874 };
6875 
6876 /*
6877  * dp_soc_set_txrx_ring_map()
6878  * @dp_soc: DP handler for soc
6879  *
6880  * Return: Void
6881  */
6882 static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
6883 {
6884 	uint32_t i;
6885 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
6886 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_DEFAULT_MAP][i];
6887 	}
6888 }
6889 
6890 /*
6891  * dp_soc_attach_wifi3() - Attach txrx SOC
6892  * @ctrl_psoc:	Opaque SOC handle from control plane
6893  * @htc_handle:	Opaque HTC handle
6894  * @hif_handle:	Opaque HIF handle
6895  * @qdf_osdev:	QDF device
6896  *
6897  * Return: DP SOC handle on success, NULL on failure
6898  */
6899 /*
6900  * Local prototype added to temporarily address warning caused by
6901  * -Wmissing-prototypes. A more correct solution, namely to expose
6902  * a prototype in an appropriate header file, will come later.
6903  */
6904 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
6905 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
6906 	struct ol_if_ops *ol_ops);
6907 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
6908 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
6909 	struct ol_if_ops *ol_ops)
6910 {
6911 	struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
6912 
6913 	if (!soc) {
6914 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6915 			FL("DP SOC memory allocation failed"));
6916 		goto fail0;
6917 	}
6918 
6919 	soc->cdp_soc.ops = &dp_txrx_ops;
6920 	soc->cdp_soc.ol_ops = ol_ops;
6921 	soc->ctrl_psoc = ctrl_psoc;
6922 	soc->osdev = qdf_osdev;
6923 	soc->hif_handle = hif_handle;
6924 
6925 	soc->hal_soc = hif_get_hal_handle(hif_handle);
6926 	soc->htt_handle = htt_soc_attach(soc, ctrl_psoc, htc_handle,
6927 		soc->hal_soc, qdf_osdev);
6928 	if (!soc->htt_handle) {
6929 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6930 			FL("HTT attach failed"));
6931 		goto fail1;
6932 	}
6933 
6934 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach();
6935 	if (!soc->wlan_cfg_ctx) {
6936 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6937 				FL("wlan_cfg_soc_attach failed"));
6938 		goto fail2;
6939 	}
6940 
6941 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx, rx_hash);
6942 	soc->cce_disable = false;
6943 
6944 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
6945 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
6946 				CDP_CFG_MAX_PEER_ID);
6947 
6948 		if (ret != -EINVAL) {
6949 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
6950 		}
6951 
6952 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
6953 				CDP_CFG_CCE_DISABLE);
6954 		if (ret == 1)
6955 			soc->cce_disable = true;
6956 	}
6957 
6958 	qdf_spinlock_create(&soc->peer_ref_mutex);
6959 
6960 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
6961 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
6962 
6963 	/* fill the tx/rx cpu ring map*/
6964 	dp_soc_set_txrx_ring_map(soc);
6965 
6966 	qdf_spinlock_create(&soc->htt_stats.lock);
6967 	/* initialize work queue for stats processing */
6968 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
6969 
6970 	/*Initialize inactivity timer for wifison */
6971 	dp_init_inact_timer(soc);
6972 
6973 	return (void *)soc;
6974 
6975 fail2:
6976 	htt_soc_detach(soc->htt_handle);
6977 fail1:
6978 	qdf_mem_free(soc);
6979 fail0:
6980 	return NULL;
6981 }
6982 
6983 /*
6984  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
6985  *
6986  * @soc: handle to DP soc
6987  * @mac_id: MAC id
6988  *
6989  * Return: Return pdev corresponding to MAC
6990  */
6991 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
6992 {
6993 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
6994 		return soc->pdev_list[mac_id];
6995 
6996 	/* Typically for MCL as there only 1 PDEV*/
6997 	return soc->pdev_list[0];
6998 }
6999 
7000 /*
7001  * dp_get_ring_id_for_mac_id() -  Return pdev for mac_id
7002  *
7003  * @soc: handle to DP soc
7004  * @mac_id: MAC id
7005  *
7006  * Return: ring id
7007  */
7008 int dp_get_ring_id_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
7009 {
7010 	/*
7011 	 * Single pdev using both MACs will operate on both MAC rings,
7012 	 * which is the case for MCL.
7013 	 */
7014 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
7015 		return mac_id;
7016 
7017 	/* For WIN each PDEV will operate one ring, so index is zero. */
7018 	return 0;
7019 }
7020 
7021 /*
7022  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
7023  * @soc:		DP SoC context
7024  * @max_mac_rings:	No of MAC rings
7025  *
7026  * Return: None
7027  */
7028 static
7029 void dp_is_hw_dbs_enable(struct dp_soc *soc,
7030 				int *max_mac_rings)
7031 {
7032 	bool dbs_enable = false;
7033 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
7034 		dbs_enable = soc->cdp_soc.ol_ops->
7035 		is_hw_dbs_2x2_capable(soc->ctrl_psoc);
7036 
7037 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
7038 }
7039 
7040 /*
7041 * dp_set_pktlog_wifi3() - attach txrx vdev
7042 * @pdev: Datapath PDEV handle
7043 * @event: which event's notifications are being subscribed to
7044 * @enable: WDI event subscribe or not. (True or False)
7045 *
7046 * Return: Success, NULL on failure
7047 */
7048 #ifdef WDI_EVENT_ENABLE
7049 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
7050 	bool enable)
7051 {
7052 	struct dp_soc *soc = pdev->soc;
7053 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
7054 	int max_mac_rings = wlan_cfg_get_num_mac_rings
7055 					(pdev->wlan_cfg_ctx);
7056 	uint8_t mac_id = 0;
7057 
7058 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
7059 
7060 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7061 			FL("Max_mac_rings %d \n"),
7062 			max_mac_rings);
7063 
7064 	if (enable) {
7065 		switch (event) {
7066 		case WDI_EVENT_RX_DESC:
7067 			if (pdev->monitor_vdev) {
7068 				/* Nothing needs to be done if monitor mode is
7069 				 * enabled
7070 				 */
7071 				return 0;
7072 			}
7073 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
7074 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
7075 				htt_tlv_filter.mpdu_start = 1;
7076 				htt_tlv_filter.msdu_start = 1;
7077 				htt_tlv_filter.msdu_end = 1;
7078 				htt_tlv_filter.mpdu_end = 1;
7079 				htt_tlv_filter.packet_header = 1;
7080 				htt_tlv_filter.attention = 1;
7081 				htt_tlv_filter.ppdu_start = 1;
7082 				htt_tlv_filter.ppdu_end = 1;
7083 				htt_tlv_filter.ppdu_end_user_stats = 1;
7084 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
7085 				htt_tlv_filter.ppdu_end_status_done = 1;
7086 				htt_tlv_filter.enable_fp = 1;
7087 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
7088 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
7089 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
7090 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
7091 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
7092 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
7093 
7094 				for (mac_id = 0; mac_id < max_mac_rings;
7095 								mac_id++) {
7096 					htt_h2t_rx_ring_cfg(soc->htt_handle,
7097 							pdev->pdev_id + mac_id,
7098 							pdev->rxdma_mon_status_ring
7099 							.hal_srng,
7100 							RXDMA_MONITOR_STATUS,
7101 							RX_BUFFER_SIZE,
7102 							&htt_tlv_filter);
7103 
7104 				}
7105 
7106 				if (soc->reap_timer_init)
7107 					qdf_timer_mod(&soc->mon_reap_timer,
7108 					DP_INTR_POLL_TIMER_MS);
7109 			}
7110 			break;
7111 
7112 		case WDI_EVENT_LITE_RX:
7113 			if (pdev->monitor_vdev) {
7114 				/* Nothing needs to be done if monitor mode is
7115 				 * enabled
7116 				 */
7117 				return 0;
7118 			}
7119 
7120 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
7121 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
7122 
7123 				htt_tlv_filter.ppdu_start = 1;
7124 				htt_tlv_filter.ppdu_end = 1;
7125 				htt_tlv_filter.ppdu_end_user_stats = 1;
7126 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
7127 				htt_tlv_filter.ppdu_end_status_done = 1;
7128 				htt_tlv_filter.mpdu_start = 1;
7129 				htt_tlv_filter.enable_fp = 1;
7130 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
7131 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
7132 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
7133 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
7134 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
7135 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
7136 
7137 				for (mac_id = 0; mac_id < max_mac_rings;
7138 								mac_id++) {
7139 					htt_h2t_rx_ring_cfg(soc->htt_handle,
7140 					pdev->pdev_id + mac_id,
7141 					pdev->rxdma_mon_status_ring
7142 					.hal_srng,
7143 					RXDMA_MONITOR_STATUS,
7144 					RX_BUFFER_SIZE_PKTLOG_LITE,
7145 					&htt_tlv_filter);
7146 				}
7147 
7148 				if (soc->reap_timer_init)
7149 					qdf_timer_mod(&soc->mon_reap_timer,
7150 					DP_INTR_POLL_TIMER_MS);
7151 			}
7152 			break;
7153 
7154 		case WDI_EVENT_LITE_T2H:
7155 			if (pdev->monitor_vdev) {
7156 				/* Nothing needs to be done if monitor mode is
7157 				 * enabled
7158 				 */
7159 				return 0;
7160 			}
7161 			/* To enable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
7162 			 * passing value 0xffff. Once these macros will define
7163 			 * in htt header file will use proper macros
7164 			*/
7165 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
7166 				pdev->pktlog_ppdu_stats = true;
7167 				dp_h2t_cfg_stats_msg_send(pdev, 0xffff,
7168 						pdev->pdev_id + mac_id);
7169 			}
7170 			break;
7171 
7172 		default:
7173 			/* Nothing needs to be done for other pktlog types */
7174 			break;
7175 		}
7176 	} else {
7177 		switch (event) {
7178 		case WDI_EVENT_RX_DESC:
7179 		case WDI_EVENT_LITE_RX:
7180 			if (pdev->monitor_vdev) {
7181 				/* Nothing needs to be done if monitor mode is
7182 				 * enabled
7183 				 */
7184 				return 0;
7185 			}
7186 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
7187 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
7188 
7189 				for (mac_id = 0; mac_id < max_mac_rings;
7190 								mac_id++) {
7191 					htt_h2t_rx_ring_cfg(soc->htt_handle,
7192 							pdev->pdev_id + mac_id,
7193 							pdev->rxdma_mon_status_ring
7194 							.hal_srng,
7195 							RXDMA_MONITOR_STATUS,
7196 							RX_BUFFER_SIZE,
7197 							&htt_tlv_filter);
7198 				}
7199 
7200 				if (soc->reap_timer_init)
7201 					qdf_timer_stop(&soc->mon_reap_timer);
7202 			}
7203 			break;
7204 		case WDI_EVENT_LITE_T2H:
7205 			if (pdev->monitor_vdev) {
7206 				/* Nothing needs to be done if monitor mode is
7207 				 * enabled
7208 				 */
7209 				return 0;
7210 			}
7211 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
7212 			 * passing value 0. Once these macros will define in htt
7213 			 * header file will use proper macros
7214 			*/
7215 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
7216 				pdev->pktlog_ppdu_stats = false;
7217 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
7218 					dp_h2t_cfg_stats_msg_send(pdev, 0,
7219 							pdev->pdev_id + mac_id);
7220 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
7221 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
7222 							pdev->pdev_id + mac_id);
7223 				} else if (pdev->enhanced_stats_en) {
7224 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
7225 							pdev->pdev_id + mac_id);
7226 				}
7227 			}
7228 
7229 			break;
7230 		default:
7231 			/* Nothing needs to be done for other pktlog types */
7232 			break;
7233 		}
7234 	}
7235 	return 0;
7236 }
7237 #endif
7238 
7239 #ifdef CONFIG_MCL
7240 /*
7241  * dp_service_mon_rings()- timer to reap monitor rings
7242  * reqd as we are not getting ppdu end interrupts
7243  * @arg: SoC Handle
7244  *
7245  * Return:
7246  *
7247  */
7248 static void dp_service_mon_rings(void *arg)
7249 {
7250 	struct dp_soc *soc = (struct dp_soc *) arg;
7251 	int ring = 0, work_done;
7252 
7253 	work_done = dp_mon_process(soc, ring, QCA_NAPI_BUDGET);
7254 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
7255 		FL("Reaped %d descs from Monitor rings"), work_done);
7256 
7257 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
7258 }
7259 
7260 #ifndef REMOVE_PKT_LOG
7261 /**
7262  * dp_pkt_log_init() - API to initialize packet log
7263  * @ppdev: physical device handle
7264  * @scn: HIF context
7265  *
7266  * Return: none
7267  */
7268 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
7269 {
7270 	struct dp_pdev *handle = (struct dp_pdev *)ppdev;
7271 
7272 	if (handle->pkt_log_init) {
7273 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7274 			 "%s: Packet log not initialized", __func__);
7275 		return;
7276 	}
7277 
7278 	pktlog_sethandle(&handle->pl_dev, scn);
7279 	pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
7280 
7281 	if (pktlogmod_init(scn)) {
7282 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7283 			 "%s: pktlogmod_init failed", __func__);
7284 		handle->pkt_log_init = false;
7285 	} else {
7286 		handle->pkt_log_init = true;
7287 	}
7288 }
7289 
7290 /**
7291  * dp_pkt_log_con_service() - connect packet log service
7292  * @ppdev: physical device handle
7293  * @scn: device context
7294  *
7295  * Return: none
7296  */
7297 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
7298 {
7299 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
7300 
7301 	dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
7302 	pktlog_htc_attach();
7303 }
7304 
7305 /**
7306  * dp_pktlogmod_exit() - API to cleanup pktlog info
7307  * @handle: Pdev handle
7308  *
7309  * Return: none
7310  */
7311 static void dp_pktlogmod_exit(struct dp_pdev *handle)
7312 {
7313 	void *scn = (void *)handle->soc->hif_handle;
7314 
7315 	if (!scn) {
7316 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7317 			 "%s: Invalid hif(scn) handle", __func__);
7318 		return;
7319 	}
7320 
7321 	pktlogmod_exit(scn);
7322 	handle->pkt_log_init = false;
7323 }
7324 #endif
7325 #else
7326 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
7327 #endif
7328 
7329