xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision c8e2987f9325baadee03d0265544a08c4a0217b0)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_api.h>
25 #include <hif.h>
26 #include <htt.h>
27 #include <wdi_event.h>
28 #include <queue.h>
29 #include "dp_htt.h"
30 #include "dp_types.h"
31 #include "dp_internal.h"
32 #include "dp_tx.h"
33 #include "dp_tx_desc.h"
34 #include "dp_rx.h"
35 #include <cdp_txrx_handle.h>
36 #include <wlan_cfg.h>
37 #include "cdp_txrx_cmn_struct.h"
38 #include "cdp_txrx_stats_struct.h"
39 #include <qdf_util.h>
40 #include "dp_peer.h"
41 #include "dp_rx_mon.h"
42 #include "htt_stats.h"
43 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
44 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
45 #include "cdp_txrx_flow_ctrl_v2.h"
46 #else
47 static inline void
48 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
49 {
50 	return;
51 }
52 #endif
53 #include "dp_ipa.h"
54 
55 #ifdef CONFIG_MCL
56 static void dp_service_mon_rings(void *arg);
57 #ifndef REMOVE_PKT_LOG
58 #include <pktlog_ac_api.h>
59 #include <pktlog_ac.h>
60 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn);
61 #endif
62 #endif
63 static void dp_pktlogmod_exit(struct dp_pdev *handle);
64 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
65 					uint8_t *peer_mac_addr);
66 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
67 
68 #define DP_INTR_POLL_TIMER_MS	10
69 #define DP_WDS_AGING_TIMER_DEFAULT_MS	120000
70 #define DP_MCS_LENGTH (6*MAX_MCS)
71 #define DP_NSS_LENGTH (6*SS_COUNT)
72 #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
73 #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
74 #define DP_MAX_MCS_STRING_LEN 30
75 #define DP_CURR_FW_STATS_AVAIL 19
76 #define DP_HTT_DBG_EXT_STATS_MAX 256
77 #define DP_MAX_SLEEP_TIME 100
78 
79 #ifdef IPA_OFFLOAD
80 /* Exclude IPA rings from the interrupt context */
81 #define TX_RING_MASK_VAL	0xb
82 #define RX_RING_MASK_VAL	0x7
83 #else
84 #define TX_RING_MASK_VAL	0xF
85 #define RX_RING_MASK_VAL	0xF
86 #endif
87 
88 bool rx_hash = 1;
89 qdf_declare_param(rx_hash, bool);
90 
91 #define STR_MAXLEN	64
92 
93 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
94 
95 /* PPDU stats mask sent to FW to enable enhanced stats */
96 #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
97 /* PPDU stats mask sent to FW to support debug sniffer feature */
98 #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
99 /**
100  * default_dscp_tid_map - Default DSCP-TID mapping
101  *
102  * DSCP        TID
103  * 000000      0
104  * 001000      1
105  * 010000      2
106  * 011000      3
107  * 100000      4
108  * 101000      5
109  * 110000      6
110  * 111000      7
111  */
112 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
113 	0, 0, 0, 0, 0, 0, 0, 0,
114 	1, 1, 1, 1, 1, 1, 1, 1,
115 	2, 2, 2, 2, 2, 2, 2, 2,
116 	3, 3, 3, 3, 3, 3, 3, 3,
117 	4, 4, 4, 4, 4, 4, 4, 4,
118 	5, 5, 5, 5, 5, 5, 5, 5,
119 	6, 6, 6, 6, 6, 6, 6, 6,
120 	7, 7, 7, 7, 7, 7, 7, 7,
121 };
122 
123 /*
124  * struct dp_rate_debug
125  *
126  * @mcs_type: print string for a given mcs
127  * @valid: valid mcs rate?
128  */
129 struct dp_rate_debug {
130 	char mcs_type[DP_MAX_MCS_STRING_LEN];
131 	uint8_t valid;
132 };
133 
134 #define MCS_VALID 1
135 #define MCS_INVALID 0
136 
137 static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
138 
139 	{
140 		{"OFDM 48 Mbps", MCS_VALID},
141 		{"OFDM 24 Mbps", MCS_VALID},
142 		{"OFDM 12 Mbps", MCS_VALID},
143 		{"OFDM 6 Mbps ", MCS_VALID},
144 		{"OFDM 54 Mbps", MCS_VALID},
145 		{"OFDM 36 Mbps", MCS_VALID},
146 		{"OFDM 18 Mbps", MCS_VALID},
147 		{"OFDM 9 Mbps ", MCS_VALID},
148 		{"INVALID ", MCS_INVALID},
149 		{"INVALID ", MCS_INVALID},
150 		{"INVALID ", MCS_INVALID},
151 		{"INVALID ", MCS_INVALID},
152 		{"INVALID ", MCS_VALID},
153 	},
154 	{
155 		{"CCK 11 Mbps Long  ", MCS_VALID},
156 		{"CCK 5.5 Mbps Long ", MCS_VALID},
157 		{"CCK 2 Mbps Long   ", MCS_VALID},
158 		{"CCK 1 Mbps Long   ", MCS_VALID},
159 		{"CCK 11 Mbps Short ", MCS_VALID},
160 		{"CCK 5.5 Mbps Short", MCS_VALID},
161 		{"CCK 2 Mbps Short  ", MCS_VALID},
162 		{"INVALID ", MCS_INVALID},
163 		{"INVALID ", MCS_INVALID},
164 		{"INVALID ", MCS_INVALID},
165 		{"INVALID ", MCS_INVALID},
166 		{"INVALID ", MCS_INVALID},
167 		{"INVALID ", MCS_VALID},
168 	},
169 	{
170 		{"HT MCS 0 (BPSK 1/2)  ", MCS_VALID},
171 		{"HT MCS 1 (QPSK 1/2)  ", MCS_VALID},
172 		{"HT MCS 2 (QPSK 3/4)  ", MCS_VALID},
173 		{"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
174 		{"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
175 		{"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
176 		{"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
177 		{"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
178 		{"INVALID ", MCS_INVALID},
179 		{"INVALID ", MCS_INVALID},
180 		{"INVALID ", MCS_INVALID},
181 		{"INVALID ", MCS_INVALID},
182 		{"INVALID ", MCS_VALID},
183 	},
184 	{
185 		{"VHT MCS 0 (BPSK 1/2)     ", MCS_VALID},
186 		{"VHT MCS 1 (QPSK 1/2)     ", MCS_VALID},
187 		{"VHT MCS 2 (QPSK 3/4)     ", MCS_VALID},
188 		{"VHT MCS 3 (16-QAM 1/2)   ", MCS_VALID},
189 		{"VHT MCS 4 (16-QAM 3/4)   ", MCS_VALID},
190 		{"VHT MCS 5 (64-QAM 2/3)   ", MCS_VALID},
191 		{"VHT MCS 6 (64-QAM 3/4)   ", MCS_VALID},
192 		{"VHT MCS 7 (64-QAM 5/6)   ", MCS_VALID},
193 		{"VHT MCS 8 (256-QAM 3/4)  ", MCS_VALID},
194 		{"VHT MCS 9 (256-QAM 5/6)  ", MCS_VALID},
195 		{"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
196 		{"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
197 		{"INVALID ", MCS_VALID},
198 	},
199 	{
200 		{"HE MCS 0 (BPSK 1/2)     ", MCS_VALID},
201 		{"HE MCS 1 (QPSK 1/2)     ", MCS_VALID},
202 		{"HE MCS 2 (QPSK 3/4)     ", MCS_VALID},
203 		{"HE MCS 3 (16-QAM 1/2)   ", MCS_VALID},
204 		{"HE MCS 4 (16-QAM 3/4)   ", MCS_VALID},
205 		{"HE MCS 5 (64-QAM 2/3)   ", MCS_VALID},
206 		{"HE MCS 6 (64-QAM 3/4)   ", MCS_VALID},
207 		{"HE MCS 7 (64-QAM 5/6)   ", MCS_VALID},
208 		{"HE MCS 8 (256-QAM 3/4)  ", MCS_VALID},
209 		{"HE MCS 9 (256-QAM 5/6)  ", MCS_VALID},
210 		{"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
211 		{"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
212 		{"INVALID ", MCS_VALID},
213 	}
214 };
215 
216 /**
217  * @brief Cpu ring map types
218  */
219 enum dp_cpu_ring_map_types {
220 	DP_DEFAULT_MAP,
221 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
222 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
223 	DP_NSS_ALL_RADIO_OFFLOADED_MAP,
224 	DP_CPU_RING_MAP_MAX
225 };
226 
227 /**
228  * @brief Cpu to tx ring map
229  */
230 static uint8_t dp_cpu_ring_map[DP_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
231 	{0x0, 0x1, 0x2, 0x0},
232 	{0x1, 0x2, 0x1, 0x2},
233 	{0x0, 0x2, 0x0, 0x2},
234 	{0x2, 0x2, 0x2, 0x2}
235 };
236 
237 /**
238  * @brief Select the type of statistics
239  */
240 enum dp_stats_type {
241 	STATS_FW = 0,
242 	STATS_HOST = 1,
243 	STATS_TYPE_MAX = 2,
244 };
245 
246 /**
247  * @brief General Firmware statistics options
248  *
249  */
250 enum dp_fw_stats {
251 	TXRX_FW_STATS_INVALID	= -1,
252 };
253 
254 /**
255  * dp_stats_mapping_table - Firmware and Host statistics
256  * currently supported
257  */
258 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
259 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
260 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
261 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
262 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
263 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
264 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
265 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
266 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
267 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
268 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
269 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
270 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
271 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
272 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
273 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
274 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
275 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
276 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
277 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
278 	/* Last ENUM for HTT FW STATS */
279 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
280 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
281 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
282 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
283 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
284 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
285 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
286 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
287 };
288 
289 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
290 					struct cdp_peer *peer_hdl,
291 					uint8_t *mac_addr,
292 					enum cdp_txrx_ast_entry_type type,
293 					uint32_t flags)
294 {
295 
296 	return dp_peer_add_ast((struct dp_soc *)soc_hdl,
297 				(struct dp_peer *)peer_hdl,
298 				mac_addr,
299 				type,
300 				flags);
301 }
302 
303 static void dp_peer_del_ast_wifi3(struct cdp_soc_t *soc_hdl,
304 					 void *ast_entry_hdl)
305 {
306 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
307 	qdf_spin_lock_bh(&soc->ast_lock);
308 	dp_peer_del_ast((struct dp_soc *)soc_hdl,
309 			(struct dp_ast_entry *)ast_entry_hdl);
310 	qdf_spin_unlock_bh(&soc->ast_lock);
311 }
312 
313 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
314 						struct cdp_peer *peer_hdl,
315 						void *ast_entry_hdl,
316 						uint32_t flags)
317 {
318 	int status;
319 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
320 	qdf_spin_lock_bh(&soc->ast_lock);
321 	status = dp_peer_update_ast(soc,
322 					(struct dp_peer *)peer_hdl,
323 					(struct dp_ast_entry *)ast_entry_hdl,
324 					flags);
325 	qdf_spin_unlock_bh(&soc->ast_lock);
326 	return status;
327 }
328 
329 static void *dp_peer_ast_hash_find_wifi3(struct cdp_soc_t *soc_hdl,
330 						uint8_t *ast_mac_addr)
331 {
332 	struct dp_ast_entry *ast_entry;
333 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
334 	qdf_spin_lock_bh(&soc->ast_lock);
335 	ast_entry = dp_peer_ast_hash_find(soc, ast_mac_addr);
336 	qdf_spin_unlock_bh(&soc->ast_lock);
337 	return (void *)ast_entry;
338 }
339 
340 static uint8_t dp_peer_ast_get_pdev_id_wifi3(struct cdp_soc_t *soc_hdl,
341 							void *ast_entry_hdl)
342 {
343 	return dp_peer_ast_get_pdev_id((struct dp_soc *)soc_hdl,
344 					(struct dp_ast_entry *)ast_entry_hdl);
345 }
346 
347 static uint8_t dp_peer_ast_get_next_hop_wifi3(struct cdp_soc_t *soc_hdl,
348 							void *ast_entry_hdl)
349 {
350 	return dp_peer_ast_get_next_hop((struct dp_soc *)soc_hdl,
351 					(struct dp_ast_entry *)ast_entry_hdl);
352 }
353 
354 static void dp_peer_ast_set_type_wifi3(
355 					struct cdp_soc_t *soc_hdl,
356 					void *ast_entry_hdl,
357 					enum cdp_txrx_ast_entry_type type)
358 {
359 	dp_peer_ast_set_type((struct dp_soc *)soc_hdl,
360 				(struct dp_ast_entry *)ast_entry_hdl,
361 				type);
362 }
363 
364 
365 
366 /**
367  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
368  * @ring_num: ring num of the ring being queried
369  * @grp_mask: the grp_mask array for the ring type in question.
370  *
371  * The grp_mask array is indexed by group number and the bit fields correspond
372  * to ring numbers.  We are finding which interrupt group a ring belongs to.
373  *
374  * Return: the index in the grp_mask array with the ring number.
375  * -QDF_STATUS_E_NOENT if no entry is found
376  */
377 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
378 {
379 	int ext_group_num;
380 	int mask = 1 << ring_num;
381 
382 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
383 	     ext_group_num++) {
384 		if (mask & grp_mask[ext_group_num])
385 			return ext_group_num;
386 	}
387 
388 	return -QDF_STATUS_E_NOENT;
389 }
390 
391 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
392 				       enum hal_ring_type ring_type,
393 				       int ring_num)
394 {
395 	int *grp_mask;
396 
397 	switch (ring_type) {
398 	case WBM2SW_RELEASE:
399 		/* dp_tx_comp_handler - soc->tx_comp_ring */
400 		if (ring_num < 3)
401 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
402 
403 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
404 		else if (ring_num == 3) {
405 			/* sw treats this as a separate ring type */
406 			grp_mask = &soc->wlan_cfg_ctx->
407 				int_rx_wbm_rel_ring_mask[0];
408 			ring_num = 0;
409 		} else {
410 			qdf_assert(0);
411 			return -QDF_STATUS_E_NOENT;
412 		}
413 	break;
414 
415 	case REO_EXCEPTION:
416 		/* dp_rx_err_process - &soc->reo_exception_ring */
417 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
418 	break;
419 
420 	case REO_DST:
421 		/* dp_rx_process - soc->reo_dest_ring */
422 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
423 	break;
424 
425 	case REO_STATUS:
426 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
427 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
428 	break;
429 
430 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
431 	case RXDMA_MONITOR_STATUS:
432 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
433 	case RXDMA_MONITOR_DST:
434 		/* dp_mon_process */
435 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
436 	break;
437 	case RXDMA_DST:
438 		/* dp_rxdma_err_process */
439 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
440 	break;
441 
442 	case RXDMA_BUF:
443 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
444 	break;
445 
446 	case RXDMA_MONITOR_BUF:
447 		/* TODO: support low_thresh interrupt */
448 		return -QDF_STATUS_E_NOENT;
449 	break;
450 
451 	case TCL_DATA:
452 	case TCL_CMD:
453 	case REO_CMD:
454 	case SW2WBM_RELEASE:
455 	case WBM_IDLE_LINK:
456 		/* normally empty SW_TO_HW rings */
457 		return -QDF_STATUS_E_NOENT;
458 	break;
459 
460 	case TCL_STATUS:
461 	case REO_REINJECT:
462 		/* misc unused rings */
463 		return -QDF_STATUS_E_NOENT;
464 	break;
465 
466 	case CE_SRC:
467 	case CE_DST:
468 	case CE_DST_STATUS:
469 		/* CE_rings - currently handled by hif */
470 	default:
471 		return -QDF_STATUS_E_NOENT;
472 	break;
473 	}
474 
475 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
476 }
477 
478 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
479 			      *ring_params, int ring_type, int ring_num)
480 {
481 	int msi_group_number;
482 	int msi_data_count;
483 	int ret;
484 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
485 
486 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
487 					    &msi_data_count, &msi_data_start,
488 					    &msi_irq_start);
489 
490 	if (ret)
491 		return;
492 
493 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
494 						       ring_num);
495 	if (msi_group_number < 0) {
496 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
497 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
498 			ring_type, ring_num);
499 		ring_params->msi_addr = 0;
500 		ring_params->msi_data = 0;
501 		return;
502 	}
503 
504 	if (msi_group_number > msi_data_count) {
505 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
506 			FL("2 msi_groups will share an msi; msi_group_num %d"),
507 			msi_group_number);
508 
509 		QDF_ASSERT(0);
510 	}
511 
512 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
513 
514 	ring_params->msi_addr = addr_low;
515 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
516 	ring_params->msi_data = (msi_group_number % msi_data_count)
517 		+ msi_data_start;
518 	ring_params->flags |= HAL_SRNG_MSI_INTR;
519 }
520 
521 /**
522  * dp_print_ast_stats() - Dump AST table contents
523  * @soc: Datapath soc handle
524  *
525  * return void
526  */
527 #ifdef FEATURE_WDS
528 static void dp_print_ast_stats(struct dp_soc *soc)
529 {
530 	uint8_t i;
531 	uint8_t num_entries = 0;
532 	struct dp_vdev *vdev;
533 	struct dp_pdev *pdev;
534 	struct dp_peer *peer;
535 	struct dp_ast_entry *ase, *tmp_ase;
536 
537 	DP_PRINT_STATS("AST Stats:");
538 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
539 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
540 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
541 	DP_PRINT_STATS("AST Table:");
542 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
543 		pdev = soc->pdev_list[i];
544 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
545 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
546 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
547 					DP_PRINT_STATS("%6d mac_addr = %pM"
548 							" peer_mac_addr = %pM"
549 							" type = %d"
550 							" next_hop = %d"
551 							" is_active = %d"
552 							" is_bss = %d"
553 							" ast_idx = %d"
554 							" pdev_id = %d"
555 							" vdev_id = %d",
556 							++num_entries,
557 							ase->mac_addr.raw,
558 							ase->peer->mac_addr.raw,
559 							ase->type,
560 							ase->next_hop,
561 							ase->is_active,
562 							ase->is_bss,
563 							ase->ast_idx,
564 							ase->pdev_id,
565 							ase->vdev_id);
566 				}
567 			}
568 		}
569 	}
570 }
571 #else
572 static void dp_print_ast_stats(struct dp_soc *soc)
573 {
574 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_WDS");
575 	return;
576 }
577 #endif
578 
579 /*
580  * dp_setup_srng - Internal function to setup SRNG rings used by data path
581  */
582 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
583 	int ring_type, int ring_num, int mac_id, uint32_t num_entries)
584 {
585 	void *hal_soc = soc->hal_soc;
586 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
587 	/* TODO: See if we should get align size from hal */
588 	uint32_t ring_base_align = 8;
589 	struct hal_srng_params ring_params;
590 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
591 
592 	/* TODO: Currently hal layer takes care of endianness related settings.
593 	 * See if these settings need to passed from DP layer
594 	 */
595 	ring_params.flags = 0;
596 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
597 		  FL("Ring type: %d, num:%d"), ring_type, ring_num);
598 
599 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
600 	srng->hal_srng = NULL;
601 	srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
602 	srng->num_entries = num_entries;
603 	srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
604 		soc->osdev, soc->osdev->dev, srng->alloc_size,
605 		&(srng->base_paddr_unaligned));
606 
607 	if (!srng->base_vaddr_unaligned) {
608 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
609 			FL("alloc failed - ring_type: %d, ring_num %d"),
610 			ring_type, ring_num);
611 		return QDF_STATUS_E_NOMEM;
612 	}
613 
614 	ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
615 		((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
616 	ring_params.ring_base_paddr = srng->base_paddr_unaligned +
617 		((unsigned long)(ring_params.ring_base_vaddr) -
618 		(unsigned long)srng->base_vaddr_unaligned);
619 	ring_params.num_entries = num_entries;
620 
621 	if (soc->intr_mode == DP_INTR_MSI) {
622 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
623 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
624 			FL("Using MSI for ring_type: %d, ring_num %d"),
625 			ring_type, ring_num);
626 
627 	} else {
628 		ring_params.msi_data = 0;
629 		ring_params.msi_addr = 0;
630 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
631 			FL("Skipping MSI for ring_type: %d, ring_num %d"),
632 			ring_type, ring_num);
633 	}
634 
635 	/*
636 	 * Setup interrupt timer and batch counter thresholds for
637 	 * interrupt mitigation based on ring type
638 	 */
639 	if (ring_type == REO_DST) {
640 		ring_params.intr_timer_thres_us =
641 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
642 		ring_params.intr_batch_cntr_thres_entries =
643 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
644 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
645 		ring_params.intr_timer_thres_us =
646 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
647 		ring_params.intr_batch_cntr_thres_entries =
648 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
649 	} else {
650 		ring_params.intr_timer_thres_us =
651 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
652 		ring_params.intr_batch_cntr_thres_entries =
653 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
654 	}
655 
656 	/* Enable low threshold interrupts for rx buffer rings (regular and
657 	 * monitor buffer rings.
658 	 * TODO: See if this is required for any other ring
659 	 */
660 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
661 		(ring_type == RXDMA_MONITOR_STATUS)) {
662 		/* TODO: Setting low threshold to 1/8th of ring size
663 		 * see if this needs to be configurable
664 		 */
665 		ring_params.low_threshold = num_entries >> 3;
666 		ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
667 		ring_params.intr_timer_thres_us = 0x1000;
668 	}
669 
670 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
671 		mac_id, &ring_params);
672 
673 	if (!srng->hal_srng) {
674 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
675 				srng->alloc_size,
676 				srng->base_vaddr_unaligned,
677 				srng->base_paddr_unaligned, 0);
678 	}
679 
680 	return 0;
681 }
682 
683 /**
684  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
685  * Any buffers allocated and attached to ring entries are expected to be freed
686  * before calling this function.
687  */
688 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
689 	int ring_type, int ring_num)
690 {
691 	if (!srng->hal_srng) {
692 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
693 			FL("Ring type: %d, num:%d not setup"),
694 			ring_type, ring_num);
695 		return;
696 	}
697 
698 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
699 
700 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
701 				srng->alloc_size,
702 				srng->base_vaddr_unaligned,
703 				srng->base_paddr_unaligned, 0);
704 	srng->hal_srng = NULL;
705 }
706 
707 /* TODO: Need this interface from HIF */
708 void *hif_get_hal_handle(void *hif_handle);
709 
710 /*
711  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
712  * @dp_ctx: DP SOC handle
713  * @budget: Number of frames/descriptors that can be processed in one shot
714  *
715  * Return: remaining budget/quota for the soc device
716  */
717 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
718 {
719 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
720 	struct dp_soc *soc = int_ctx->soc;
721 	int ring = 0;
722 	uint32_t work_done  = 0;
723 	int budget = dp_budget;
724 	uint8_t tx_mask = int_ctx->tx_ring_mask;
725 	uint8_t rx_mask = int_ctx->rx_ring_mask;
726 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
727 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
728 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
729 	uint32_t remaining_quota = dp_budget;
730 	struct dp_pdev *pdev = NULL;
731 
732 	/* Process Tx completion interrupts first to return back buffers */
733 	while (tx_mask) {
734 		if (tx_mask & 0x1) {
735 			work_done = dp_tx_comp_handler(soc,
736 					soc->tx_comp_ring[ring].hal_srng,
737 					remaining_quota);
738 
739 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
740 				"tx mask 0x%x ring %d, budget %d, work_done %d",
741 				tx_mask, ring, budget, work_done);
742 
743 			budget -= work_done;
744 			if (budget <= 0)
745 				goto budget_done;
746 
747 			remaining_quota = budget;
748 		}
749 		tx_mask = tx_mask >> 1;
750 		ring++;
751 	}
752 
753 
754 	/* Process REO Exception ring interrupt */
755 	if (rx_err_mask) {
756 		work_done = dp_rx_err_process(soc,
757 				soc->reo_exception_ring.hal_srng,
758 				remaining_quota);
759 
760 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
761 			"REO Exception Ring: work_done %d budget %d",
762 			work_done, budget);
763 
764 		budget -=  work_done;
765 		if (budget <= 0) {
766 			goto budget_done;
767 		}
768 		remaining_quota = budget;
769 	}
770 
771 	/* Process Rx WBM release ring interrupt */
772 	if (rx_wbm_rel_mask) {
773 		work_done = dp_rx_wbm_err_process(soc,
774 				soc->rx_rel_ring.hal_srng, remaining_quota);
775 
776 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
777 			"WBM Release Ring: work_done %d budget %d",
778 			work_done, budget);
779 
780 		budget -=  work_done;
781 		if (budget <= 0) {
782 			goto budget_done;
783 		}
784 		remaining_quota = budget;
785 	}
786 
787 	/* Process Rx interrupts */
788 	if (rx_mask) {
789 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
790 			if (rx_mask & (1 << ring)) {
791 				work_done = dp_rx_process(int_ctx,
792 					    soc->reo_dest_ring[ring].hal_srng,
793 					    remaining_quota);
794 
795 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
796 					"rx mask 0x%x ring %d, work_done %d budget %d",
797 					rx_mask, ring, work_done, budget);
798 
799 				budget -=  work_done;
800 				if (budget <= 0)
801 					goto budget_done;
802 				remaining_quota = budget;
803 			}
804 		}
805 		for (ring = 0; ring < MAX_RX_MAC_RINGS; ring++) {
806 			/* Need to check on this, why is required */
807 			work_done = dp_rxdma_err_process(soc, ring,
808 						remaining_quota);
809 			budget -= work_done;
810 		}
811 	}
812 
813 	if (reo_status_mask)
814 		dp_reo_status_ring_handler(soc);
815 
816 	/* Process LMAC interrupts */
817 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
818 		pdev = soc->pdev_list[ring];
819 		if (pdev == NULL)
820 			continue;
821 		if (int_ctx->rx_mon_ring_mask & (1 << ring)) {
822 			work_done = dp_mon_process(soc, ring, remaining_quota);
823 			budget -= work_done;
824 			if (budget <= 0)
825 				goto budget_done;
826 			remaining_quota = budget;
827 		}
828 
829 		if (int_ctx->rxdma2host_ring_mask & (1 << ring)) {
830 			work_done = dp_rxdma_err_process(soc, ring,
831 						remaining_quota);
832 			budget -=  work_done;
833 			if (budget <= 0)
834 				goto budget_done;
835 			remaining_quota = budget;
836 		}
837 
838 		if (int_ctx->host2rxdma_ring_mask & (1 << ring)) {
839 			union dp_rx_desc_list_elem_t *desc_list = NULL;
840 			union dp_rx_desc_list_elem_t *tail = NULL;
841 			struct dp_srng *rx_refill_buf_ring =
842 				&pdev->rx_refill_buf_ring;
843 
844 			DP_STATS_INC(pdev, replenish.low_thresh_intrs, 1);
845 			dp_rx_buffers_replenish(soc, ring,
846 				rx_refill_buf_ring,
847 				&soc->rx_desc_buf[ring], 0,
848 				&desc_list, &tail, HAL_RX_BUF_RBM_SW3_BM);
849 		}
850 	}
851 
852 	qdf_lro_flush(int_ctx->lro_ctx);
853 
854 budget_done:
855 	return dp_budget - budget;
856 }
857 
858 #ifdef DP_INTR_POLL_BASED
859 /* dp_interrupt_timer()- timer poll for interrupts
860  *
861  * @arg: SoC Handle
862  *
863  * Return:
864  *
865  */
866 static void dp_interrupt_timer(void *arg)
867 {
868 	struct dp_soc *soc = (struct dp_soc *) arg;
869 	int i;
870 
871 	if (qdf_atomic_read(&soc->cmn_init_done)) {
872 		for (i = 0;
873 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
874 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
875 
876 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
877 	}
878 }
879 
880 /*
881  * dp_soc_interrupt_attach_poll() - Register handlers for DP interrupts
882  * @txrx_soc: DP SOC handle
883  *
884  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
885  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
886  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
887  *
888  * Return: 0 for success. nonzero for failure.
889  */
890 static QDF_STATUS dp_soc_interrupt_attach_poll(void *txrx_soc)
891 {
892 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
893 	int i;
894 
895 	soc->intr_mode = DP_INTR_POLL;
896 
897 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
898 		soc->intr_ctx[i].dp_intr_id = i;
899 		soc->intr_ctx[i].tx_ring_mask =
900 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
901 		soc->intr_ctx[i].rx_ring_mask =
902 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
903 		soc->intr_ctx[i].rx_mon_ring_mask =
904 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
905 		soc->intr_ctx[i].rx_err_ring_mask =
906 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
907 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
908 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
909 		soc->intr_ctx[i].reo_status_ring_mask =
910 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
911 		soc->intr_ctx[i].rxdma2host_ring_mask =
912 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
913 		soc->intr_ctx[i].soc = soc;
914 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
915 	}
916 
917 	qdf_timer_init(soc->osdev, &soc->int_timer,
918 			dp_interrupt_timer, (void *)soc,
919 			QDF_TIMER_TYPE_WAKE_APPS);
920 
921 	return QDF_STATUS_SUCCESS;
922 }
923 
924 #if defined(CONFIG_MCL)
925 extern int con_mode_monitor;
926 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
927 /*
928  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
929  * @txrx_soc: DP SOC handle
930  *
931  * Call the appropriate attach function based on the mode of operation.
932  * This is a WAR for enabling monitor mode.
933  *
934  * Return: 0 for success. nonzero for failure.
935  */
936 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
937 {
938 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
939 
940 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
941 	     con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
942 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
943 				  "%s: Poll mode", __func__);
944 		return dp_soc_interrupt_attach_poll(txrx_soc);
945 	} else {
946 
947 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
948 				  "%s: Interrupt  mode", __func__);
949 		return dp_soc_interrupt_attach(txrx_soc);
950 	}
951 }
952 #else
953 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
954 {
955 	return dp_soc_interrupt_attach_poll(txrx_soc);
956 }
957 #endif
958 #endif
959 
960 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
961 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
962 {
963 	int j;
964 	int num_irq = 0;
965 
966 	int tx_mask =
967 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
968 	int rx_mask =
969 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
970 	int rx_mon_mask =
971 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
972 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
973 					soc->wlan_cfg_ctx, intr_ctx_num);
974 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
975 					soc->wlan_cfg_ctx, intr_ctx_num);
976 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
977 					soc->wlan_cfg_ctx, intr_ctx_num);
978 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
979 					soc->wlan_cfg_ctx, intr_ctx_num);
980 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
981 					soc->wlan_cfg_ctx, intr_ctx_num);
982 
983 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
984 
985 		if (tx_mask & (1 << j)) {
986 			irq_id_map[num_irq++] =
987 				(wbm2host_tx_completions_ring1 - j);
988 		}
989 
990 		if (rx_mask & (1 << j)) {
991 			irq_id_map[num_irq++] =
992 				(reo2host_destination_ring1 - j);
993 		}
994 
995 		if (rxdma2host_ring_mask & (1 << j)) {
996 			irq_id_map[num_irq++] =
997 				rxdma2host_destination_ring_mac1 -
998 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
999 		}
1000 
1001 		if (host2rxdma_ring_mask & (1 << j)) {
1002 			irq_id_map[num_irq++] =
1003 				host2rxdma_host_buf_ring_mac1 -
1004 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1005 		}
1006 
1007 		if (rx_mon_mask & (1 << j)) {
1008 			irq_id_map[num_irq++] =
1009 				ppdu_end_interrupts_mac1 -
1010 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1011 			irq_id_map[num_irq++] =
1012 				rxdma2host_monitor_status_ring_mac1 -
1013 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1014 		}
1015 
1016 		if (rx_wbm_rel_ring_mask & (1 << j))
1017 			irq_id_map[num_irq++] = wbm2host_rx_release;
1018 
1019 		if (rx_err_ring_mask & (1 << j))
1020 			irq_id_map[num_irq++] = reo2host_exception;
1021 
1022 		if (reo_status_ring_mask & (1 << j))
1023 			irq_id_map[num_irq++] = reo2host_status;
1024 
1025 	}
1026 	*num_irq_r = num_irq;
1027 }
1028 
1029 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1030 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1031 		int msi_vector_count, int msi_vector_start)
1032 {
1033 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1034 					soc->wlan_cfg_ctx, intr_ctx_num);
1035 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1036 					soc->wlan_cfg_ctx, intr_ctx_num);
1037 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1038 					soc->wlan_cfg_ctx, intr_ctx_num);
1039 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1040 					soc->wlan_cfg_ctx, intr_ctx_num);
1041 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1042 					soc->wlan_cfg_ctx, intr_ctx_num);
1043 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1044 					soc->wlan_cfg_ctx, intr_ctx_num);
1045 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1046 					soc->wlan_cfg_ctx, intr_ctx_num);
1047 
1048 	unsigned int vector =
1049 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1050 	int num_irq = 0;
1051 
1052 	soc->intr_mode = DP_INTR_MSI;
1053 
1054 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1055 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1056 		irq_id_map[num_irq++] =
1057 			pld_get_msi_irq(soc->osdev->dev, vector);
1058 
1059 	*num_irq_r = num_irq;
1060 }
1061 
1062 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1063 				    int *irq_id_map, int *num_irq)
1064 {
1065 	int msi_vector_count, ret;
1066 	uint32_t msi_base_data, msi_vector_start;
1067 
1068 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1069 					    &msi_vector_count,
1070 					    &msi_base_data,
1071 					    &msi_vector_start);
1072 	if (ret)
1073 		return dp_soc_interrupt_map_calculate_integrated(soc,
1074 				intr_ctx_num, irq_id_map, num_irq);
1075 
1076 	else
1077 		dp_soc_interrupt_map_calculate_msi(soc,
1078 				intr_ctx_num, irq_id_map, num_irq,
1079 				msi_vector_count, msi_vector_start);
1080 }
1081 
1082 /*
1083  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1084  * @txrx_soc: DP SOC handle
1085  *
1086  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1087  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1088  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1089  *
1090  * Return: 0 for success. nonzero for failure.
1091  */
1092 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
1093 {
1094 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1095 
1096 	int i = 0;
1097 	int num_irq = 0;
1098 
1099 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1100 		int ret = 0;
1101 
1102 		/* Map of IRQ ids registered with one interrupt context */
1103 		int irq_id_map[HIF_MAX_GRP_IRQ];
1104 
1105 		int tx_mask =
1106 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1107 		int rx_mask =
1108 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1109 		int rx_mon_mask =
1110 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1111 		int rx_err_ring_mask =
1112 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1113 		int rx_wbm_rel_ring_mask =
1114 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1115 		int reo_status_ring_mask =
1116 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1117 		int rxdma2host_ring_mask =
1118 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1119 		int host2rxdma_ring_mask =
1120 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1121 
1122 
1123 		soc->intr_ctx[i].dp_intr_id = i;
1124 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1125 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1126 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1127 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1128 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1129 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1130 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1131 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1132 
1133 		soc->intr_ctx[i].soc = soc;
1134 
1135 		num_irq = 0;
1136 
1137 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1138 					       &num_irq);
1139 
1140 		ret = hif_register_ext_group(soc->hif_handle,
1141 				num_irq, irq_id_map, dp_service_srngs,
1142 				&soc->intr_ctx[i], "dp_intr",
1143 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1144 
1145 		if (ret) {
1146 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1147 			FL("failed, ret = %d"), ret);
1148 
1149 			return QDF_STATUS_E_FAILURE;
1150 		}
1151 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1152 	}
1153 
1154 	hif_configure_ext_group_interrupts(soc->hif_handle);
1155 
1156 	return QDF_STATUS_SUCCESS;
1157 }
1158 
1159 /*
1160  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1161  * @txrx_soc: DP SOC handle
1162  *
1163  * Return: void
1164  */
1165 static void dp_soc_interrupt_detach(void *txrx_soc)
1166 {
1167 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1168 	int i;
1169 
1170 	if (soc->intr_mode == DP_INTR_POLL) {
1171 		qdf_timer_stop(&soc->int_timer);
1172 		qdf_timer_free(&soc->int_timer);
1173 	} else {
1174 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1175 	}
1176 
1177 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1178 		soc->intr_ctx[i].tx_ring_mask = 0;
1179 		soc->intr_ctx[i].rx_ring_mask = 0;
1180 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1181 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1182 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1183 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1184 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1185 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1186 
1187 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1188 	}
1189 }
1190 
1191 #define AVG_MAX_MPDUS_PER_TID 128
1192 #define AVG_TIDS_PER_CLIENT 2
1193 #define AVG_FLOWS_PER_TID 2
1194 #define AVG_MSDUS_PER_FLOW 128
1195 #define AVG_MSDUS_PER_MPDU 4
1196 
1197 /*
1198  * Allocate and setup link descriptor pool that will be used by HW for
1199  * various link and queue descriptors and managed by WBM
1200  */
1201 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1202 {
1203 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1204 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1205 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1206 	uint32_t num_mpdus_per_link_desc =
1207 		hal_num_mpdus_per_link_desc(soc->hal_soc);
1208 	uint32_t num_msdus_per_link_desc =
1209 		hal_num_msdus_per_link_desc(soc->hal_soc);
1210 	uint32_t num_mpdu_links_per_queue_desc =
1211 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1212 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1213 	uint32_t total_link_descs, total_mem_size;
1214 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1215 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1216 	uint32_t num_link_desc_banks;
1217 	uint32_t last_bank_size = 0;
1218 	uint32_t entry_size, num_entries;
1219 	int i;
1220 	uint32_t desc_id = 0;
1221 
1222 	/* Only Tx queue descriptors are allocated from common link descriptor
1223 	 * pool Rx queue descriptors are not included in this because (REO queue
1224 	 * extension descriptors) they are expected to be allocated contiguously
1225 	 * with REO queue descriptors
1226 	 */
1227 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1228 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1229 
1230 	num_mpdu_queue_descs = num_mpdu_link_descs /
1231 		num_mpdu_links_per_queue_desc;
1232 
1233 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1234 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1235 		num_msdus_per_link_desc;
1236 
1237 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1238 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1239 
1240 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1241 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1242 
1243 	/* Round up to power of 2 */
1244 	total_link_descs = 1;
1245 	while (total_link_descs < num_entries)
1246 		total_link_descs <<= 1;
1247 
1248 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1249 		FL("total_link_descs: %u, link_desc_size: %d"),
1250 		total_link_descs, link_desc_size);
1251 	total_mem_size =  total_link_descs * link_desc_size;
1252 
1253 	total_mem_size += link_desc_align;
1254 
1255 	if (total_mem_size <= max_alloc_size) {
1256 		num_link_desc_banks = 0;
1257 		last_bank_size = total_mem_size;
1258 	} else {
1259 		num_link_desc_banks = (total_mem_size) /
1260 			(max_alloc_size - link_desc_align);
1261 		last_bank_size = total_mem_size %
1262 			(max_alloc_size - link_desc_align);
1263 	}
1264 
1265 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1266 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
1267 		total_mem_size, num_link_desc_banks);
1268 
1269 	for (i = 0; i < num_link_desc_banks; i++) {
1270 		soc->link_desc_banks[i].base_vaddr_unaligned =
1271 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1272 			max_alloc_size,
1273 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1274 		soc->link_desc_banks[i].size = max_alloc_size;
1275 
1276 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1277 			soc->link_desc_banks[i].base_vaddr_unaligned) +
1278 			((unsigned long)(
1279 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1280 			link_desc_align));
1281 
1282 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
1283 			soc->link_desc_banks[i].base_paddr_unaligned) +
1284 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1285 			(unsigned long)(
1286 			soc->link_desc_banks[i].base_vaddr_unaligned));
1287 
1288 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
1289 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1290 				FL("Link descriptor memory alloc failed"));
1291 			goto fail;
1292 		}
1293 	}
1294 
1295 	if (last_bank_size) {
1296 		/* Allocate last bank in case total memory required is not exact
1297 		 * multiple of max_alloc_size
1298 		 */
1299 		soc->link_desc_banks[i].base_vaddr_unaligned =
1300 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1301 			last_bank_size,
1302 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1303 		soc->link_desc_banks[i].size = last_bank_size;
1304 
1305 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1306 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
1307 			((unsigned long)(
1308 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1309 			link_desc_align));
1310 
1311 		soc->link_desc_banks[i].base_paddr =
1312 			(unsigned long)(
1313 			soc->link_desc_banks[i].base_paddr_unaligned) +
1314 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1315 			(unsigned long)(
1316 			soc->link_desc_banks[i].base_vaddr_unaligned));
1317 	}
1318 
1319 
1320 	/* Allocate and setup link descriptor idle list for HW internal use */
1321 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1322 	total_mem_size = entry_size * total_link_descs;
1323 
1324 	if (total_mem_size <= max_alloc_size) {
1325 		void *desc;
1326 
1327 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
1328 			WBM_IDLE_LINK, 0, 0, total_link_descs)) {
1329 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1330 				FL("Link desc idle ring setup failed"));
1331 			goto fail;
1332 		}
1333 
1334 		hal_srng_access_start_unlocked(soc->hal_soc,
1335 			soc->wbm_idle_link_ring.hal_srng);
1336 
1337 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1338 			soc->link_desc_banks[i].base_paddr; i++) {
1339 			uint32_t num_entries = (soc->link_desc_banks[i].size -
1340 				((unsigned long)(
1341 				soc->link_desc_banks[i].base_vaddr) -
1342 				(unsigned long)(
1343 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1344 				/ link_desc_size;
1345 			unsigned long paddr = (unsigned long)(
1346 				soc->link_desc_banks[i].base_paddr);
1347 
1348 			while (num_entries && (desc = hal_srng_src_get_next(
1349 				soc->hal_soc,
1350 				soc->wbm_idle_link_ring.hal_srng))) {
1351 				hal_set_link_desc_addr(desc,
1352 					LINK_DESC_COOKIE(desc_id, i), paddr);
1353 				num_entries--;
1354 				desc_id++;
1355 				paddr += link_desc_size;
1356 			}
1357 		}
1358 		hal_srng_access_end_unlocked(soc->hal_soc,
1359 			soc->wbm_idle_link_ring.hal_srng);
1360 	} else {
1361 		uint32_t num_scatter_bufs;
1362 		uint32_t num_entries_per_buf;
1363 		uint32_t rem_entries;
1364 		uint8_t *scatter_buf_ptr;
1365 		uint16_t scatter_buf_num;
1366 
1367 		soc->wbm_idle_scatter_buf_size =
1368 			hal_idle_list_scatter_buf_size(soc->hal_soc);
1369 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
1370 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
1371 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1372 					soc->hal_soc, total_mem_size,
1373 					soc->wbm_idle_scatter_buf_size);
1374 
1375 		for (i = 0; i < num_scatter_bufs; i++) {
1376 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
1377 				qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1378 				soc->wbm_idle_scatter_buf_size,
1379 				&(soc->wbm_idle_scatter_buf_base_paddr[i]));
1380 			if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
1381 				QDF_TRACE(QDF_MODULE_ID_DP,
1382 					QDF_TRACE_LEVEL_ERROR,
1383 					FL("Scatter list memory alloc failed"));
1384 				goto fail;
1385 			}
1386 		}
1387 
1388 		/* Populate idle list scatter buffers with link descriptor
1389 		 * pointers
1390 		 */
1391 		scatter_buf_num = 0;
1392 		scatter_buf_ptr = (uint8_t *)(
1393 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
1394 		rem_entries = num_entries_per_buf;
1395 
1396 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1397 			soc->link_desc_banks[i].base_paddr; i++) {
1398 			uint32_t num_link_descs =
1399 				(soc->link_desc_banks[i].size -
1400 				((unsigned long)(
1401 				soc->link_desc_banks[i].base_vaddr) -
1402 				(unsigned long)(
1403 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1404 				/ link_desc_size;
1405 			unsigned long paddr = (unsigned long)(
1406 				soc->link_desc_banks[i].base_paddr);
1407 
1408 			while (num_link_descs) {
1409 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
1410 					LINK_DESC_COOKIE(desc_id, i), paddr);
1411 				num_link_descs--;
1412 				desc_id++;
1413 				paddr += link_desc_size;
1414 				rem_entries--;
1415 				if (rem_entries) {
1416 					scatter_buf_ptr += entry_size;
1417 				} else {
1418 					rem_entries = num_entries_per_buf;
1419 					scatter_buf_num++;
1420 
1421 					if (scatter_buf_num >= num_scatter_bufs)
1422 						break;
1423 
1424 					scatter_buf_ptr = (uint8_t *)(
1425 						soc->wbm_idle_scatter_buf_base_vaddr[
1426 						scatter_buf_num]);
1427 				}
1428 			}
1429 		}
1430 		/* Setup link descriptor idle list in HW */
1431 		hal_setup_link_idle_list(soc->hal_soc,
1432 			soc->wbm_idle_scatter_buf_base_paddr,
1433 			soc->wbm_idle_scatter_buf_base_vaddr,
1434 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
1435 			(uint32_t)(scatter_buf_ptr -
1436 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
1437 			scatter_buf_num-1])), total_link_descs);
1438 	}
1439 	return 0;
1440 
1441 fail:
1442 	if (soc->wbm_idle_link_ring.hal_srng) {
1443 		dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
1444 			WBM_IDLE_LINK, 0);
1445 	}
1446 
1447 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1448 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1449 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1450 				soc->wbm_idle_scatter_buf_size,
1451 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1452 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1453 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1454 		}
1455 	}
1456 
1457 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1458 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1459 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1460 				soc->link_desc_banks[i].size,
1461 				soc->link_desc_banks[i].base_vaddr_unaligned,
1462 				soc->link_desc_banks[i].base_paddr_unaligned,
1463 				0);
1464 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1465 		}
1466 	}
1467 	return QDF_STATUS_E_FAILURE;
1468 }
1469 
1470 /*
1471  * Free link descriptor pool that was setup HW
1472  */
1473 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
1474 {
1475 	int i;
1476 
1477 	if (soc->wbm_idle_link_ring.hal_srng) {
1478 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
1479 			WBM_IDLE_LINK, 0);
1480 	}
1481 
1482 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1483 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1484 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1485 				soc->wbm_idle_scatter_buf_size,
1486 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1487 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1488 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1489 		}
1490 	}
1491 
1492 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1493 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1494 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1495 				soc->link_desc_banks[i].size,
1496 				soc->link_desc_banks[i].base_vaddr_unaligned,
1497 				soc->link_desc_banks[i].base_paddr_unaligned,
1498 				0);
1499 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1500 		}
1501 	}
1502 }
1503 
1504 /* TODO: Following should be configurable */
1505 #define WBM_RELEASE_RING_SIZE 64
1506 #define TCL_CMD_RING_SIZE 32
1507 #define TCL_STATUS_RING_SIZE 32
1508 #if defined(QCA_WIFI_QCA6290)
1509 #define REO_DST_RING_SIZE 1024
1510 #else
1511 #define REO_DST_RING_SIZE 2048
1512 #endif
1513 #define REO_REINJECT_RING_SIZE 32
1514 #define RX_RELEASE_RING_SIZE 1024
1515 #define REO_EXCEPTION_RING_SIZE 128
1516 #define REO_CMD_RING_SIZE 64
1517 #define REO_STATUS_RING_SIZE 128
1518 #define RXDMA_BUF_RING_SIZE 1024
1519 #define RXDMA_REFILL_RING_SIZE 4096
1520 #define RXDMA_MONITOR_BUF_RING_SIZE 4096
1521 #define RXDMA_MONITOR_DST_RING_SIZE 2048
1522 #define RXDMA_MONITOR_STATUS_RING_SIZE 1024
1523 #define RXDMA_MONITOR_DESC_RING_SIZE 4096
1524 #define RXDMA_ERR_DST_RING_SIZE 1024
1525 
1526 /*
1527  * dp_wds_aging_timer_fn() - Timer callback function for WDS aging
1528  * @soc: Datapath SOC handle
1529  *
1530  * This is a timer function used to age out stale WDS nodes from
1531  * AST table
1532  */
1533 #ifdef FEATURE_WDS
1534 static void dp_wds_aging_timer_fn(void *soc_hdl)
1535 {
1536 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
1537 	struct dp_pdev *pdev;
1538 	struct dp_vdev *vdev;
1539 	struct dp_peer *peer;
1540 	struct dp_ast_entry *ase, *temp_ase;
1541 	int i;
1542 
1543 	qdf_spin_lock_bh(&soc->ast_lock);
1544 
1545 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1546 		pdev = soc->pdev_list[i];
1547 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1548 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1549 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
1550 					/*
1551 					 * Do not expire static ast entries
1552 					 * and HM WDS entries
1553 					 */
1554 					if (ase->type ==
1555 						CDP_TXRX_AST_TYPE_STATIC ||
1556 						ase->type ==
1557 						CDP_TXRX_AST_TYPE_WDS_HM)
1558 						continue;
1559 
1560 					if (ase->is_active) {
1561 						ase->is_active = FALSE;
1562 						continue;
1563 					}
1564 
1565 					DP_STATS_INC(soc, ast.aged_out, 1);
1566 					dp_peer_del_ast(soc, ase);
1567 				}
1568 			}
1569 		}
1570 
1571 	}
1572 
1573 	qdf_spin_unlock_bh(&soc->ast_lock);
1574 
1575 	if (qdf_atomic_read(&soc->cmn_init_done))
1576 		qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1577 }
1578 
1579 /*
1580  * dp_soc_wds_attach() - Setup WDS timer and AST table
1581  * @soc:		Datapath SOC handle
1582  *
1583  * Return: None
1584  */
1585 static void dp_soc_wds_attach(struct dp_soc *soc)
1586 {
1587 	qdf_timer_init(soc->osdev, &soc->wds_aging_timer,
1588 			dp_wds_aging_timer_fn, (void *)soc,
1589 			QDF_TIMER_TYPE_WAKE_APPS);
1590 
1591 	qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1592 }
1593 
1594 /*
1595  * dp_soc_wds_detach() - Detach WDS data structures and timers
1596  * @txrx_soc: DP SOC handle
1597  *
1598  * Return: None
1599  */
1600 static void dp_soc_wds_detach(struct dp_soc *soc)
1601 {
1602 	qdf_timer_stop(&soc->wds_aging_timer);
1603 	qdf_timer_free(&soc->wds_aging_timer);
1604 }
1605 #else
1606 static void dp_soc_wds_attach(struct dp_soc *soc)
1607 {
1608 }
1609 
1610 static void dp_soc_wds_detach(struct dp_soc *soc)
1611 {
1612 }
1613 #endif
1614 
1615 /*
1616  * dp_soc_reset_ring_map() - Reset cpu ring map
1617  * @soc: Datapath soc handler
1618  *
1619  * This api resets the default cpu ring map
1620  */
1621 
1622 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
1623 {
1624 	uint8_t i;
1625 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1626 
1627 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
1628 		if (nss_config == 1) {
1629 			/*
1630 			 * Setting Tx ring map for one nss offloaded radio
1631 			 */
1632 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
1633 		} else if (nss_config == 2) {
1634 			/*
1635 			 * Setting Tx ring for two nss offloaded radios
1636 			 */
1637 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
1638 		} else {
1639 			/*
1640 			 * Setting Tx ring map for all nss offloaded radios
1641 			 */
1642 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_ALL_RADIO_OFFLOADED_MAP][i];
1643 		}
1644 	}
1645 }
1646 
1647 /*
1648  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
1649  * @dp_soc - DP soc handle
1650  * @ring_type - ring type
1651  * @ring_num - ring_num
1652  *
1653  * return 0 or 1
1654  */
1655 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
1656 {
1657 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1658 	uint8_t status = 0;
1659 
1660 	switch (ring_type) {
1661 	case WBM2SW_RELEASE:
1662 	case REO_DST:
1663 	case RXDMA_BUF:
1664 		status = ((nss_config) & (1 << ring_num));
1665 		break;
1666 	default:
1667 		break;
1668 	}
1669 
1670 	return status;
1671 }
1672 
1673 /*
1674  * dp_soc_reset_intr_mask() - reset interrupt mask
1675  * @dp_soc - DP Soc handle
1676  *
1677  * Return: Return void
1678  */
1679 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
1680 {
1681 	uint8_t j;
1682 	int *grp_mask = NULL;
1683 	int group_number, mask, num_ring;
1684 
1685 	/* number of tx ring */
1686 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
1687 
1688 	/*
1689 	 * group mask for tx completion  ring.
1690 	 */
1691 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1692 
1693 	/* loop and reset the mask for only offloaded ring */
1694 	for (j = 0; j < num_ring; j++) {
1695 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
1696 			continue;
1697 		}
1698 
1699 		/*
1700 		 * Group number corresponding to tx offloaded ring.
1701 		 */
1702 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1703 		if (group_number < 0) {
1704 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1705 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
1706 					WBM2SW_RELEASE, j);
1707 			return;
1708 		}
1709 
1710 		/* reset the tx mask for offloaded ring */
1711 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
1712 		mask &= (~(1 << j));
1713 
1714 		/*
1715 		 * reset the interrupt mask for offloaded ring.
1716 		 */
1717 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1718 	}
1719 
1720 	/* number of rx rings */
1721 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
1722 
1723 	/*
1724 	 * group mask for reo destination ring.
1725 	 */
1726 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1727 
1728 	/* loop and reset the mask for only offloaded ring */
1729 	for (j = 0; j < num_ring; j++) {
1730 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
1731 			continue;
1732 		}
1733 
1734 		/*
1735 		 * Group number corresponding to rx offloaded ring.
1736 		 */
1737 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1738 		if (group_number < 0) {
1739 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1740 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
1741 					REO_DST, j);
1742 			return;
1743 		}
1744 
1745 		/* set the interrupt mask for offloaded ring */
1746 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
1747 		mask &= (~(1 << j));
1748 
1749 		/*
1750 		 * set the interrupt mask to zero for rx offloaded radio.
1751 		 */
1752 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1753 	}
1754 
1755 	/*
1756 	 * group mask for Rx buffer refill ring
1757 	 */
1758 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1759 
1760 	/* loop and reset the mask for only offloaded ring */
1761 	for (j = 0; j < MAX_PDEV_CNT; j++) {
1762 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
1763 			continue;
1764 		}
1765 
1766 		/*
1767 		 * Group number corresponding to rx offloaded ring.
1768 		 */
1769 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1770 		if (group_number < 0) {
1771 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1772 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
1773 					REO_DST, j);
1774 			return;
1775 		}
1776 
1777 		/* set the interrupt mask for offloaded ring */
1778 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1779 				group_number);
1780 		mask &= (~(1 << j));
1781 
1782 		/*
1783 		 * set the interrupt mask to zero for rx offloaded radio.
1784 		 */
1785 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1786 			group_number, mask);
1787 	}
1788 }
1789 
1790 #ifdef IPA_OFFLOAD
1791 /**
1792  * dp_reo_remap_config() - configure reo remap register value based
1793  *                         nss configuration.
1794  *		based on offload_radio value below remap configuration
1795  *		get applied.
1796  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
1797  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
1798  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
1799  *		3 - both Radios handled by NSS (remap not required)
1800  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
1801  *
1802  * @remap1: output parameter indicates reo remap 1 register value
1803  * @remap2: output parameter indicates reo remap 2 register value
1804  * Return: bool type, true if remap is configured else false.
1805  */
1806 static bool dp_reo_remap_config(struct dp_soc *soc,
1807 				uint32_t *remap1,
1808 				uint32_t *remap2)
1809 {
1810 
1811 	*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
1812 		(0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
1813 
1814 	*remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
1815 		(0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
1816 
1817 	return true;
1818 }
1819 #else
1820 static bool dp_reo_remap_config(struct dp_soc *soc,
1821 				uint32_t *remap1,
1822 				uint32_t *remap2)
1823 {
1824 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1825 
1826 	switch (offload_radio) {
1827 	case 0:
1828 		*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
1829 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
1830 			(0x3 << 18) | (0x4 << 21)) << 8;
1831 
1832 		*remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
1833 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
1834 			(0x3 << 18) | (0x4 << 21)) << 8;
1835 		break;
1836 
1837 	case 1:
1838 		*remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
1839 			(0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
1840 			(0x2 << 18) | (0x3 << 21)) << 8;
1841 
1842 		*remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
1843 			(0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
1844 			(0x4 << 18) | (0x2 << 21)) << 8;
1845 		break;
1846 
1847 	case 2:
1848 		*remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
1849 			(0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
1850 			(0x1 << 18) | (0x3 << 21)) << 8;
1851 
1852 		*remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
1853 			(0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
1854 			(0x4 << 18) | (0x1 << 21)) << 8;
1855 		break;
1856 
1857 	case 3:
1858 		/* return false if both radios are offloaded to NSS */
1859 		return false;
1860 	}
1861 	return true;
1862 }
1863 #endif
1864 
1865 /*
1866  * dp_reo_frag_dst_set() - configure reo register to set the
1867  *                        fragment destination ring
1868  * @soc : Datapath soc
1869  * @frag_dst_ring : output parameter to set fragment destination ring
1870  *
1871  * Based on offload_radio below fragment destination rings is selected
1872  * 0 - TCL
1873  * 1 - SW1
1874  * 2 - SW2
1875  * 3 - SW3
1876  * 4 - SW4
1877  * 5 - Release
1878  * 6 - FW
1879  * 7 - alternate select
1880  *
1881  * return: void
1882  */
1883 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
1884 {
1885 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1886 
1887 	switch (offload_radio) {
1888 	case 0:
1889 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
1890 		break;
1891 	case 3:
1892 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
1893 		break;
1894 	default:
1895 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1896 				FL("dp_reo_frag_dst_set invalid offload radio config"));
1897 		break;
1898 	}
1899 }
1900 
1901 /*
1902  * dp_soc_cmn_setup() - Common SoC level initializion
1903  * @soc:		Datapath SOC handle
1904  *
1905  * This is an internal function used to setup common SOC data structures,
1906  * to be called from PDEV attach after receiving HW mode capabilities from FW
1907  */
1908 static int dp_soc_cmn_setup(struct dp_soc *soc)
1909 {
1910 	int i;
1911 	struct hal_reo_params reo_params;
1912 	int tx_ring_size;
1913 	int tx_comp_ring_size;
1914 
1915 	if (qdf_atomic_read(&soc->cmn_init_done))
1916 		return 0;
1917 
1918 	if (dp_peer_find_attach(soc))
1919 		goto fail0;
1920 
1921 	if (dp_hw_link_desc_pool_setup(soc))
1922 		goto fail1;
1923 
1924 	/* Setup SRNG rings */
1925 	/* Common rings */
1926 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
1927 		WBM_RELEASE_RING_SIZE)) {
1928 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1929 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
1930 		goto fail1;
1931 	}
1932 
1933 
1934 	soc->num_tcl_data_rings = 0;
1935 	/* Tx data rings */
1936 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
1937 		soc->num_tcl_data_rings =
1938 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
1939 		tx_comp_ring_size =
1940 			wlan_cfg_tx_comp_ring_size(soc->wlan_cfg_ctx);
1941 		tx_ring_size =
1942 			wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
1943 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
1944 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
1945 				TCL_DATA, i, 0, tx_ring_size)) {
1946 				QDF_TRACE(QDF_MODULE_ID_DP,
1947 					QDF_TRACE_LEVEL_ERROR,
1948 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
1949 				goto fail1;
1950 			}
1951 			/*
1952 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
1953 			 * count
1954 			 */
1955 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
1956 				WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
1957 				QDF_TRACE(QDF_MODULE_ID_DP,
1958 					QDF_TRACE_LEVEL_ERROR,
1959 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
1960 				goto fail1;
1961 			}
1962 		}
1963 	} else {
1964 		/* This will be incremented during per pdev ring setup */
1965 		soc->num_tcl_data_rings = 0;
1966 	}
1967 
1968 	if (dp_tx_soc_attach(soc)) {
1969 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1970 				FL("dp_tx_soc_attach failed"));
1971 		goto fail1;
1972 	}
1973 
1974 	/* TCL command and status rings */
1975 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
1976 		TCL_CMD_RING_SIZE)) {
1977 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1978 			FL("dp_srng_setup failed for tcl_cmd_ring"));
1979 		goto fail1;
1980 	}
1981 
1982 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
1983 		TCL_STATUS_RING_SIZE)) {
1984 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1985 			FL("dp_srng_setup failed for tcl_status_ring"));
1986 		goto fail1;
1987 	}
1988 
1989 
1990 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
1991 	 * descriptors
1992 	 */
1993 
1994 	/* Rx data rings */
1995 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
1996 		soc->num_reo_dest_rings =
1997 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
1998 		QDF_TRACE(QDF_MODULE_ID_DP,
1999 			QDF_TRACE_LEVEL_ERROR,
2000 			FL("num_reo_dest_rings %d\n"), soc->num_reo_dest_rings);
2001 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2002 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2003 				i, 0, REO_DST_RING_SIZE)) {
2004 				QDF_TRACE(QDF_MODULE_ID_DP,
2005 					QDF_TRACE_LEVEL_ERROR,
2006 					FL("dp_srng_setup failed for reo_dest_ring[%d]"), i);
2007 				goto fail1;
2008 			}
2009 		}
2010 	} else {
2011 		/* This will be incremented during per pdev ring setup */
2012 		soc->num_reo_dest_rings = 0;
2013 	}
2014 
2015 	/* LMAC RxDMA to SW Rings configuration */
2016 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2017 		/* Only valid for MCL */
2018 		struct dp_pdev *pdev = soc->pdev_list[0];
2019 
2020 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2021 			if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2022 				RXDMA_DST, 0, i, RXDMA_ERR_DST_RING_SIZE)) {
2023 				QDF_TRACE(QDF_MODULE_ID_DP,
2024 					QDF_TRACE_LEVEL_ERROR,
2025 					FL("dp_srng_setup failed for rxdma_err_dst_ring"));
2026 				goto fail1;
2027 			}
2028 		}
2029 	}
2030 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
2031 
2032 	/* REO reinjection ring */
2033 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2034 		REO_REINJECT_RING_SIZE)) {
2035 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2036 			FL("dp_srng_setup failed for reo_reinject_ring"));
2037 		goto fail1;
2038 	}
2039 
2040 
2041 	/* Rx release ring */
2042 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2043 		RX_RELEASE_RING_SIZE)) {
2044 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2045 			FL("dp_srng_setup failed for rx_rel_ring"));
2046 		goto fail1;
2047 	}
2048 
2049 
2050 	/* Rx exception ring */
2051 	if (dp_srng_setup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0,
2052 		MAX_REO_DEST_RINGS, REO_EXCEPTION_RING_SIZE)) {
2053 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2054 			FL("dp_srng_setup failed for reo_exception_ring"));
2055 		goto fail1;
2056 	}
2057 
2058 
2059 	/* REO command and status rings */
2060 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
2061 		REO_CMD_RING_SIZE)) {
2062 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2063 			FL("dp_srng_setup failed for reo_cmd_ring"));
2064 		goto fail1;
2065 	}
2066 
2067 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2068 	TAILQ_INIT(&soc->rx.reo_cmd_list);
2069 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2070 
2071 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
2072 		REO_STATUS_RING_SIZE)) {
2073 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2074 			FL("dp_srng_setup failed for reo_status_ring"));
2075 		goto fail1;
2076 	}
2077 
2078 	qdf_spinlock_create(&soc->ast_lock);
2079 	dp_soc_wds_attach(soc);
2080 
2081 	/* Reset the cpu ring map if radio is NSS offloaded */
2082 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2083 		dp_soc_reset_cpu_ring_map(soc);
2084 		dp_soc_reset_intr_mask(soc);
2085 	}
2086 
2087 	/* Setup HW REO */
2088 	qdf_mem_zero(&reo_params, sizeof(reo_params));
2089 
2090 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2091 
2092 		/*
2093 		 * Reo ring remap is not required if both radios
2094 		 * are offloaded to NSS
2095 		 */
2096 		if (!dp_reo_remap_config(soc,
2097 					&reo_params.remap1,
2098 					&reo_params.remap2))
2099 			goto out;
2100 
2101 		reo_params.rx_hash_enabled = true;
2102 	}
2103 
2104 	/* setup the global rx defrag waitlist */
2105 	TAILQ_INIT(&soc->rx.defrag.waitlist);
2106 	soc->rx.defrag.timeout_ms =
2107 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
2108 	soc->rx.flags.defrag_timeout_check =
2109 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
2110 
2111 out:
2112 	/*
2113 	 * set the fragment destination ring
2114 	 */
2115 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2116 
2117 	hal_reo_setup(soc->hal_soc, &reo_params);
2118 
2119 	qdf_atomic_set(&soc->cmn_init_done, 1);
2120 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
2121 	return 0;
2122 fail1:
2123 	/*
2124 	 * Cleanup will be done as part of soc_detach, which will
2125 	 * be called on pdev attach failure
2126 	 */
2127 fail0:
2128 	return QDF_STATUS_E_FAILURE;
2129 }
2130 
2131 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
2132 
2133 static void dp_lro_hash_setup(struct dp_soc *soc)
2134 {
2135 	struct cdp_lro_hash_config lro_hash;
2136 
2137 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2138 		!wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2139 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2140 			 FL("LRO disabled RX hash disabled"));
2141 		return;
2142 	}
2143 
2144 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2145 
2146 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
2147 		lro_hash.lro_enable = 1;
2148 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2149 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
2150 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2151 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
2152 	}
2153 
2154 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, FL("enabled"));
2155 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
2156 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2157 		 LRO_IPV4_SEED_ARR_SZ));
2158 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2159 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2160 		 LRO_IPV6_SEED_ARR_SZ));
2161 
2162 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2163 		 "lro_hash: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
2164 		 lro_hash.lro_enable, lro_hash.tcp_flag,
2165 		 lro_hash.tcp_flag_mask);
2166 
2167 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2168 		 QDF_TRACE_LEVEL_ERROR,
2169 		 (void *)lro_hash.toeplitz_hash_ipv4,
2170 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2171 		 LRO_IPV4_SEED_ARR_SZ));
2172 
2173 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2174 		 QDF_TRACE_LEVEL_ERROR,
2175 		 (void *)lro_hash.toeplitz_hash_ipv6,
2176 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2177 		 LRO_IPV6_SEED_ARR_SZ));
2178 
2179 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2180 
2181 	if (soc->cdp_soc.ol_ops->lro_hash_config)
2182 		(void)soc->cdp_soc.ol_ops->lro_hash_config
2183 			(soc->ctrl_psoc, &lro_hash);
2184 }
2185 
2186 /*
2187 * dp_rxdma_ring_setup() - configure the RX DMA rings
2188 * @soc: data path SoC handle
2189 * @pdev: Physical device handle
2190 *
2191 * Return: 0 - success, > 0 - failure
2192 */
2193 #ifdef QCA_HOST2FW_RXBUF_RING
2194 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2195 	 struct dp_pdev *pdev)
2196 {
2197 	int max_mac_rings =
2198 		 wlan_cfg_get_num_mac_rings
2199 			(pdev->wlan_cfg_ctx);
2200 	int i;
2201 
2202 	for (i = 0; i < max_mac_rings; i++) {
2203 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2204 			 "%s: pdev_id %d mac_id %d\n",
2205 			 __func__, pdev->pdev_id, i);
2206 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
2207 			 RXDMA_BUF, 1, i, RXDMA_BUF_RING_SIZE)) {
2208 			QDF_TRACE(QDF_MODULE_ID_DP,
2209 				 QDF_TRACE_LEVEL_ERROR,
2210 				 FL("failed rx mac ring setup"));
2211 			return QDF_STATUS_E_FAILURE;
2212 		}
2213 	}
2214 	return QDF_STATUS_SUCCESS;
2215 }
2216 #else
2217 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2218 	 struct dp_pdev *pdev)
2219 {
2220 	return QDF_STATUS_SUCCESS;
2221 }
2222 #endif
2223 
2224 /**
2225  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2226  * @pdev - DP_PDEV handle
2227  *
2228  * Return: void
2229  */
2230 static inline void
2231 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2232 {
2233 	uint8_t map_id;
2234 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2235 		qdf_mem_copy(pdev->dscp_tid_map[map_id], default_dscp_tid_map,
2236 				sizeof(default_dscp_tid_map));
2237 	}
2238 	for (map_id = 0; map_id < HAL_MAX_HW_DSCP_TID_MAPS; map_id++) {
2239 		hal_tx_set_dscp_tid_map(pdev->soc->hal_soc,
2240 				pdev->dscp_tid_map[map_id],
2241 				map_id);
2242 	}
2243 }
2244 
2245 #ifdef QCA_SUPPORT_SON
2246 /**
2247  * dp_mark_peer_inact(): Update peer inactivity status
2248  * @peer_handle - datapath peer handle
2249  *
2250  * Return: void
2251  */
2252 void dp_mark_peer_inact(void *peer_handle, bool inactive)
2253 {
2254 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2255 	struct dp_pdev *pdev;
2256 	struct dp_soc *soc;
2257 	bool inactive_old;
2258 
2259 	if (!peer)
2260 		return;
2261 
2262 	pdev = peer->vdev->pdev;
2263 	soc = pdev->soc;
2264 
2265 	inactive_old = peer->peer_bs_inact_flag == 1;
2266 	if (!inactive)
2267 		peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2268 	peer->peer_bs_inact_flag = inactive ? 1 : 0;
2269 
2270 	if (inactive_old != inactive) {
2271 		/**
2272 		 * Note: a node lookup can happen in RX datapath context
2273 		 * when a node changes from inactive to active (at most once
2274 		 * per inactivity timeout threshold)
2275 		 */
2276 		if (soc->cdp_soc.ol_ops->record_act_change) {
2277 			soc->cdp_soc.ol_ops->record_act_change(pdev->osif_pdev,
2278 					peer->mac_addr.raw, !inactive);
2279 		}
2280 	}
2281 }
2282 
2283 /**
2284  * dp_txrx_peer_find_inact_timeout_handler(): Inactivity timeout function
2285  *
2286  * Periodically checks the inactivity status
2287  */
2288 static os_timer_func(dp_txrx_peer_find_inact_timeout_handler)
2289 {
2290 	struct dp_pdev *pdev;
2291 	struct dp_vdev *vdev;
2292 	struct dp_peer *peer;
2293 	struct dp_soc *soc;
2294 	int i;
2295 
2296 	OS_GET_TIMER_ARG(soc, struct dp_soc *);
2297 
2298 	qdf_spin_lock(&soc->peer_ref_mutex);
2299 
2300 	for (i = 0; i < soc->pdev_count; i++) {
2301 	pdev = soc->pdev_list[i];
2302 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2303 		if (vdev->opmode != wlan_op_mode_ap)
2304 			continue;
2305 
2306 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2307 			if (!peer->authorize) {
2308 				/**
2309 				 * Inactivity check only interested in
2310 				 * connected node
2311 				 */
2312 				continue;
2313 			}
2314 			if (peer->peer_bs_inact > soc->pdev_bs_inact_reload) {
2315 				/**
2316 				 * This check ensures we do not wait extra long
2317 				 * due to the potential race condition
2318 				 */
2319 				peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2320 			}
2321 			if (peer->peer_bs_inact > 0) {
2322 				/* Do not let it wrap around */
2323 				peer->peer_bs_inact--;
2324 			}
2325 			if (peer->peer_bs_inact == 0)
2326 				dp_mark_peer_inact(peer, true);
2327 		}
2328 	}
2329 	}
2330 
2331 	qdf_spin_unlock(&soc->peer_ref_mutex);
2332 	qdf_timer_mod(&soc->pdev_bs_inact_timer,
2333 		      soc->pdev_bs_inact_interval * 1000);
2334 }
2335 
2336 
2337 /**
2338  * dp_free_inact_timer(): free inact timer
2339  * @timer - inact timer handle
2340  *
2341  * Return: bool
2342  */
2343 void dp_free_inact_timer(struct dp_soc *soc)
2344 {
2345 	qdf_timer_free(&soc->pdev_bs_inact_timer);
2346 }
2347 #else
2348 
2349 void dp_mark_peer_inact(void *peer, bool inactive)
2350 {
2351 	return;
2352 }
2353 
2354 void dp_free_inact_timer(struct dp_soc *soc)
2355 {
2356 	return;
2357 }
2358 
2359 #endif
2360 
2361 #ifdef IPA_OFFLOAD
2362 /**
2363  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
2364  * @soc: data path instance
2365  * @pdev: core txrx pdev context
2366  *
2367  * Return: QDF_STATUS_SUCCESS: success
2368  *         QDF_STATUS_E_RESOURCES: Error return
2369  */
2370 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2371 					   struct dp_pdev *pdev)
2372 {
2373 	/* Setup second Rx refill buffer ring */
2374 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2375 			  IPA_RX_REFILL_BUF_RING_IDX,
2376 			  pdev->pdev_id, RXDMA_REFILL_RING_SIZE)) {
2377 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2378 			FL("dp_srng_setup failed second rx refill ring"));
2379 		return QDF_STATUS_E_FAILURE;
2380 	}
2381 	return QDF_STATUS_SUCCESS;
2382 }
2383 
2384 /**
2385  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
2386  * @soc: data path instance
2387  * @pdev: core txrx pdev context
2388  *
2389  * Return: void
2390  */
2391 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2392 					      struct dp_pdev *pdev)
2393 {
2394 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2395 			IPA_RX_REFILL_BUF_RING_IDX);
2396 }
2397 
2398 #else
2399 
2400 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2401 					   struct dp_pdev *pdev)
2402 {
2403 	return QDF_STATUS_SUCCESS;
2404 }
2405 
2406 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2407 					      struct dp_pdev *pdev)
2408 {
2409 }
2410 
2411 #endif
2412 
2413 /*
2414 * dp_pdev_attach_wifi3() - attach txrx pdev
2415 * @ctrl_pdev: Opaque PDEV object
2416 * @txrx_soc: Datapath SOC handle
2417 * @htc_handle: HTC handle for host-target interface
2418 * @qdf_osdev: QDF OS device
2419 * @pdev_id: PDEV ID
2420 *
2421 * Return: DP PDEV handle on success, NULL on failure
2422 */
2423 static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
2424 	struct cdp_cfg *ctrl_pdev,
2425 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
2426 {
2427 	int tx_ring_size;
2428 	int tx_comp_ring_size;
2429 
2430 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2431 	struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
2432 
2433 	if (!pdev) {
2434 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2435 			FL("DP PDEV memory allocation failed"));
2436 		goto fail0;
2437 	}
2438 
2439 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach();
2440 
2441 	if (!pdev->wlan_cfg_ctx) {
2442 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2443 			FL("pdev cfg_attach failed"));
2444 
2445 		qdf_mem_free(pdev);
2446 		goto fail0;
2447 	}
2448 
2449 	/*
2450 	 * set nss pdev config based on soc config
2451 	 */
2452 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
2453 			(wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx) & (1 << pdev_id)));
2454 
2455 	pdev->soc = soc;
2456 	pdev->osif_pdev = ctrl_pdev;
2457 	pdev->pdev_id = pdev_id;
2458 	soc->pdev_list[pdev_id] = pdev;
2459 	soc->pdev_count++;
2460 
2461 	TAILQ_INIT(&pdev->vdev_list);
2462 	pdev->vdev_count = 0;
2463 
2464 	qdf_spinlock_create(&pdev->tx_mutex);
2465 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
2466 	TAILQ_INIT(&pdev->neighbour_peers_list);
2467 
2468 	if (dp_soc_cmn_setup(soc)) {
2469 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2470 			FL("dp_soc_cmn_setup failed"));
2471 		goto fail1;
2472 	}
2473 
2474 	/* Setup per PDEV TCL rings if configured */
2475 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2476 		tx_ring_size =
2477 			wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
2478 		tx_comp_ring_size =
2479 			wlan_cfg_tx_comp_ring_size(soc->wlan_cfg_ctx);
2480 
2481 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
2482 			pdev_id, pdev_id, tx_ring_size)) {
2483 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2484 				FL("dp_srng_setup failed for tcl_data_ring"));
2485 			goto fail1;
2486 		}
2487 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
2488 			WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
2489 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2490 				FL("dp_srng_setup failed for tx_comp_ring"));
2491 			goto fail1;
2492 		}
2493 		soc->num_tcl_data_rings++;
2494 	}
2495 
2496 	/* Tx specific init */
2497 	if (dp_tx_pdev_attach(pdev)) {
2498 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2499 			FL("dp_tx_pdev_attach failed"));
2500 		goto fail1;
2501 	}
2502 
2503 	/* Setup per PDEV REO rings if configured */
2504 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
2505 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
2506 			pdev_id, pdev_id, REO_DST_RING_SIZE)) {
2507 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2508 				FL("dp_srng_setup failed for reo_dest_ringn"));
2509 			goto fail1;
2510 		}
2511 		soc->num_reo_dest_rings++;
2512 
2513 	}
2514 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
2515 		RXDMA_REFILL_RING_SIZE)) {
2516 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2517 			 FL("dp_srng_setup failed rx refill ring"));
2518 		goto fail1;
2519 	}
2520 
2521 	if (dp_rxdma_ring_setup(soc, pdev)) {
2522 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2523 			 FL("RXDMA ring config failed"));
2524 		goto fail1;
2525 	}
2526 
2527 	if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring, RXDMA_MONITOR_BUF, 0,
2528 		pdev_id, RXDMA_MONITOR_BUF_RING_SIZE)) {
2529 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2530 			FL("dp_srng_setup failed for rxdma_mon_buf_ring"));
2531 		goto fail1;
2532 	}
2533 
2534 	if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring, RXDMA_MONITOR_DST, 0,
2535 		pdev_id, RXDMA_MONITOR_DST_RING_SIZE)) {
2536 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2537 			FL("dp_srng_setup failed for rxdma_mon_dst_ring"));
2538 		goto fail1;
2539 	}
2540 
2541 
2542 	if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring,
2543 		RXDMA_MONITOR_STATUS, 0, pdev_id,
2544 		RXDMA_MONITOR_STATUS_RING_SIZE)) {
2545 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2546 			FL("dp_srng_setup failed for rxdma_mon_status_ring"));
2547 		goto fail1;
2548 	}
2549 
2550 	if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring,
2551 		RXDMA_MONITOR_DESC, 0, pdev_id, RXDMA_MONITOR_DESC_RING_SIZE)) {
2552 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2553 			"dp_srng_setup failed for rxdma_mon_desc_ring\n");
2554 		goto fail1;
2555 	}
2556 
2557 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2558 		if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
2559 				  0, pdev_id, RXDMA_ERR_DST_RING_SIZE)) {
2560 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2561 				FL("dp_srng_setup failed for rxdma_err_dst_ring"));
2562 			goto fail1;
2563 		}
2564 	}
2565 
2566 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
2567 		goto fail1;
2568 
2569 	if (dp_ipa_ring_resource_setup(soc, pdev))
2570 		goto fail1;
2571 
2572 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
2573 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2574 			FL("dp_ipa_uc_attach failed"));
2575 		goto fail1;
2576 	}
2577 
2578 	/* Rx specific init */
2579 	if (dp_rx_pdev_attach(pdev)) {
2580 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2581 			FL("dp_rx_pdev_attach failed"));
2582 		goto fail0;
2583 	}
2584 	DP_STATS_INIT(pdev);
2585 
2586 	/* Monitor filter init */
2587 	pdev->mon_filter_mode = MON_FILTER_ALL;
2588 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
2589 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
2590 	pdev->fp_data_filter = FILTER_DATA_ALL;
2591 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
2592 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
2593 	pdev->mo_data_filter = FILTER_DATA_ALL;
2594 
2595 #ifndef CONFIG_WIN
2596 	/* MCL */
2597 	dp_local_peer_id_pool_init(pdev);
2598 #endif
2599 	dp_dscp_tid_map_setup(pdev);
2600 
2601 	/* Rx monitor mode specific init */
2602 	if (dp_rx_pdev_mon_attach(pdev)) {
2603 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2604 				"dp_rx_pdev_attach failed\n");
2605 		goto fail1;
2606 	}
2607 
2608 	if (dp_wdi_event_attach(pdev)) {
2609 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2610 				"dp_wdi_evet_attach failed\n");
2611 		goto fail1;
2612 	}
2613 
2614 	/* set the reo destination during initialization */
2615 	pdev->reo_dest = pdev->pdev_id + 1;
2616 
2617 	/*
2618 	 * initialize ppdu tlv list
2619 	 */
2620 	TAILQ_INIT(&pdev->ppdu_info_list);
2621 	pdev->tlv_count = 0;
2622 	pdev->list_depth = 0;
2623 
2624 	return (struct cdp_pdev *)pdev;
2625 
2626 fail1:
2627 	dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
2628 
2629 fail0:
2630 	return NULL;
2631 }
2632 
2633 /*
2634 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
2635 * @soc: data path SoC handle
2636 * @pdev: Physical device handle
2637 *
2638 * Return: void
2639 */
2640 #ifdef QCA_HOST2FW_RXBUF_RING
2641 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
2642 	 struct dp_pdev *pdev)
2643 {
2644 	int max_mac_rings =
2645 		 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
2646 	int i;
2647 
2648 	max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
2649 				max_mac_rings : MAX_RX_MAC_RINGS;
2650 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
2651 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
2652 			 RXDMA_BUF, 1);
2653 
2654 	qdf_timer_free(&soc->mon_reap_timer);
2655 }
2656 #else
2657 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
2658 	 struct dp_pdev *pdev)
2659 {
2660 }
2661 #endif
2662 
2663 /*
2664  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
2665  * @pdev: device object
2666  *
2667  * Return: void
2668  */
2669 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
2670 {
2671 	struct dp_neighbour_peer *peer = NULL;
2672 	struct dp_neighbour_peer *temp_peer = NULL;
2673 
2674 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
2675 			neighbour_peer_list_elem, temp_peer) {
2676 		/* delete this peer from the list */
2677 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
2678 				peer, neighbour_peer_list_elem);
2679 		qdf_mem_free(peer);
2680 	}
2681 
2682 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
2683 }
2684 
2685 /*
2686 * dp_pdev_detach_wifi3() - detach txrx pdev
2687 * @txrx_pdev: Datapath PDEV handle
2688 * @force: Force detach
2689 *
2690 */
2691 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
2692 {
2693 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
2694 	struct dp_soc *soc = pdev->soc;
2695 	qdf_nbuf_t curr_nbuf, next_nbuf;
2696 
2697 	dp_wdi_event_detach(pdev);
2698 
2699 	dp_tx_pdev_detach(pdev);
2700 
2701 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2702 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
2703 			TCL_DATA, pdev->pdev_id);
2704 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
2705 			WBM2SW_RELEASE, pdev->pdev_id);
2706 	}
2707 
2708 	dp_pktlogmod_exit(pdev);
2709 
2710 	dp_rx_pdev_detach(pdev);
2711 
2712 	dp_rx_pdev_mon_detach(pdev);
2713 
2714 	dp_neighbour_peers_detach(pdev);
2715 	qdf_spinlock_destroy(&pdev->tx_mutex);
2716 
2717 	dp_ipa_uc_detach(soc, pdev);
2718 
2719 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
2720 
2721 	/* Cleanup per PDEV REO rings if configured */
2722 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
2723 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
2724 			REO_DST, pdev->pdev_id);
2725 	}
2726 
2727 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
2728 
2729 	dp_rxdma_ring_cleanup(soc, pdev);
2730 
2731 	dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring, RXDMA_MONITOR_BUF, 0);
2732 
2733 	dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring, RXDMA_MONITOR_DST, 0);
2734 
2735 	dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring,
2736 		RXDMA_MONITOR_STATUS, 0);
2737 
2738 	dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring,
2739 		RXDMA_MONITOR_DESC, 0);
2740 
2741 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2742 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST, 0);
2743 	} else {
2744 		int i;
2745 
2746 		for (i = 0; i < MAX_RX_MAC_RINGS; i++)
2747 			dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[i],
2748 				RXDMA_DST, 0);
2749 	}
2750 
2751 	curr_nbuf = pdev->invalid_peer_head_msdu;
2752 	while (curr_nbuf) {
2753 		next_nbuf = qdf_nbuf_next(curr_nbuf);
2754 		qdf_nbuf_free(curr_nbuf);
2755 		curr_nbuf = next_nbuf;
2756 	}
2757 
2758 	soc->pdev_list[pdev->pdev_id] = NULL;
2759 	soc->pdev_count--;
2760 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
2761 	qdf_mem_free(pdev->dp_txrx_handle);
2762 	qdf_mem_free(pdev);
2763 }
2764 
2765 /*
2766  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
2767  * @soc: DP SOC handle
2768  */
2769 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
2770 {
2771 	struct reo_desc_list_node *desc;
2772 	struct dp_rx_tid *rx_tid;
2773 
2774 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2775 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
2776 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
2777 		rx_tid = &desc->rx_tid;
2778 		qdf_mem_unmap_nbytes_single(soc->osdev,
2779 			rx_tid->hw_qdesc_paddr,
2780 			QDF_DMA_BIDIRECTIONAL,
2781 			rx_tid->hw_qdesc_alloc_size);
2782 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2783 		qdf_mem_free(desc);
2784 	}
2785 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2786 	qdf_list_destroy(&soc->reo_desc_freelist);
2787 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
2788 }
2789 
2790 /*
2791  * dp_soc_detach_wifi3() - Detach txrx SOC
2792  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
2793  */
2794 static void dp_soc_detach_wifi3(void *txrx_soc)
2795 {
2796 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2797 	int i;
2798 
2799 	qdf_atomic_set(&soc->cmn_init_done, 0);
2800 
2801 	qdf_flush_work(&soc->htt_stats.work);
2802 	qdf_disable_work(&soc->htt_stats.work);
2803 
2804 	/* Free pending htt stats messages */
2805 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
2806 
2807 	dp_free_inact_timer(soc);
2808 
2809 	for (i = 0; i < MAX_PDEV_CNT; i++) {
2810 		if (soc->pdev_list[i])
2811 			dp_pdev_detach_wifi3(
2812 				(struct cdp_pdev *)soc->pdev_list[i], 1);
2813 	}
2814 
2815 	dp_peer_find_detach(soc);
2816 
2817 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
2818 	 * SW descriptors
2819 	 */
2820 
2821 	/* Free the ring memories */
2822 	/* Common rings */
2823 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
2824 
2825 	dp_tx_soc_detach(soc);
2826 	/* Tx data rings */
2827 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2828 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2829 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
2830 				TCL_DATA, i);
2831 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
2832 				WBM2SW_RELEASE, i);
2833 		}
2834 	}
2835 
2836 	/* TCL command and status rings */
2837 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
2838 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
2839 
2840 	/* Rx data rings */
2841 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
2842 		soc->num_reo_dest_rings =
2843 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2844 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2845 			/* TODO: Get number of rings and ring sizes
2846 			 * from wlan_cfg
2847 			 */
2848 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
2849 				REO_DST, i);
2850 		}
2851 	}
2852 	/* REO reinjection ring */
2853 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
2854 
2855 	/* Rx release ring */
2856 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
2857 
2858 	/* Rx exception ring */
2859 	/* TODO: Better to store ring_type and ring_num in
2860 	 * dp_srng during setup
2861 	 */
2862 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
2863 
2864 	/* REO command and status rings */
2865 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
2866 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
2867 	dp_hw_link_desc_pool_cleanup(soc);
2868 
2869 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
2870 	qdf_spinlock_destroy(&soc->htt_stats.lock);
2871 
2872 	htt_soc_detach(soc->htt_handle);
2873 
2874 	dp_reo_cmdlist_destroy(soc);
2875 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
2876 	dp_reo_desc_freelist_destroy(soc);
2877 
2878 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
2879 
2880 	dp_soc_wds_detach(soc);
2881 	qdf_spinlock_destroy(&soc->ast_lock);
2882 
2883 	qdf_mem_free(soc);
2884 }
2885 
2886 /*
2887  * dp_rxdma_ring_config() - configure the RX DMA rings
2888  *
2889  * This function is used to configure the MAC rings.
2890  * On MCL host provides buffers in Host2FW ring
2891  * FW refills (copies) buffers to the ring and updates
2892  * ring_idx in register
2893  *
2894  * @soc: data path SoC handle
2895  *
2896  * Return: void
2897  */
2898 #ifdef QCA_HOST2FW_RXBUF_RING
2899 static void dp_rxdma_ring_config(struct dp_soc *soc)
2900 {
2901 	int i;
2902 
2903 	for (i = 0; i < MAX_PDEV_CNT; i++) {
2904 		struct dp_pdev *pdev = soc->pdev_list[i];
2905 
2906 		if (pdev) {
2907 			int mac_id = 0;
2908 			int j;
2909 			bool dbs_enable = 0;
2910 			int max_mac_rings =
2911 				 wlan_cfg_get_num_mac_rings
2912 				(pdev->wlan_cfg_ctx);
2913 
2914 			htt_srng_setup(soc->htt_handle, 0,
2915 				 pdev->rx_refill_buf_ring.hal_srng,
2916 				 RXDMA_BUF);
2917 
2918 			if (pdev->rx_refill_buf_ring2.hal_srng)
2919 				htt_srng_setup(soc->htt_handle, 0,
2920 					pdev->rx_refill_buf_ring2.hal_srng,
2921 					RXDMA_BUF);
2922 
2923 			if (soc->cdp_soc.ol_ops->
2924 				is_hw_dbs_2x2_capable) {
2925 				dbs_enable = soc->cdp_soc.ol_ops->
2926 					is_hw_dbs_2x2_capable(soc->ctrl_psoc);
2927 			}
2928 
2929 			if (dbs_enable) {
2930 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2931 				QDF_TRACE_LEVEL_ERROR,
2932 				FL("DBS enabled max_mac_rings %d\n"),
2933 					 max_mac_rings);
2934 			} else {
2935 				max_mac_rings = 1;
2936 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2937 					 QDF_TRACE_LEVEL_ERROR,
2938 					 FL("DBS disabled, max_mac_rings %d\n"),
2939 					 max_mac_rings);
2940 			}
2941 
2942 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2943 					 FL("pdev_id %d max_mac_rings %d\n"),
2944 					 pdev->pdev_id, max_mac_rings);
2945 
2946 			for (j = 0; j < max_mac_rings; j++) {
2947 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2948 					 QDF_TRACE_LEVEL_ERROR,
2949 					 FL("mac_id %d\n"), mac_id);
2950 				htt_srng_setup(soc->htt_handle, mac_id,
2951 					 pdev->rx_mac_buf_ring[j]
2952 						.hal_srng,
2953 					 RXDMA_BUF);
2954 				htt_srng_setup(soc->htt_handle, mac_id,
2955 					pdev->rxdma_err_dst_ring[j]
2956 						.hal_srng,
2957 					RXDMA_DST);
2958 				mac_id++;
2959 			}
2960 
2961 			/* Configure monitor mode rings */
2962 			htt_srng_setup(soc->htt_handle, i,
2963 					pdev->rxdma_mon_buf_ring.hal_srng,
2964 					RXDMA_MONITOR_BUF);
2965 
2966 			htt_srng_setup(soc->htt_handle, i,
2967 					pdev->rxdma_mon_dst_ring.hal_srng,
2968 					RXDMA_MONITOR_DST);
2969 
2970 			htt_srng_setup(soc->htt_handle, i,
2971 				pdev->rxdma_mon_status_ring.hal_srng,
2972 				RXDMA_MONITOR_STATUS);
2973 
2974 			htt_srng_setup(soc->htt_handle, i,
2975 				pdev->rxdma_mon_desc_ring.hal_srng,
2976 				RXDMA_MONITOR_DESC);
2977 		}
2978 	}
2979 
2980 	/*
2981 	 * Timer to reap rxdma status rings.
2982 	 * Needed until we enable ppdu end interrupts
2983 	 */
2984 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
2985 			dp_service_mon_rings, (void *)soc,
2986 			QDF_TIMER_TYPE_WAKE_APPS);
2987 	soc->reap_timer_init = 1;
2988 }
2989 #else
2990 static void dp_rxdma_ring_config(struct dp_soc *soc)
2991 {
2992 	int i;
2993 
2994 	for (i = 0; i < MAX_PDEV_CNT; i++) {
2995 		struct dp_pdev *pdev = soc->pdev_list[i];
2996 
2997 		if (pdev) {
2998 			int ring_idx = dp_get_ring_id_for_mac_id(soc, i);
2999 
3000 			htt_srng_setup(soc->htt_handle, i,
3001 				pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
3002 
3003 			htt_srng_setup(soc->htt_handle, i,
3004 					pdev->rxdma_mon_buf_ring.hal_srng,
3005 					RXDMA_MONITOR_BUF);
3006 			htt_srng_setup(soc->htt_handle, i,
3007 					pdev->rxdma_mon_dst_ring.hal_srng,
3008 					RXDMA_MONITOR_DST);
3009 			htt_srng_setup(soc->htt_handle, i,
3010 				pdev->rxdma_mon_status_ring.hal_srng,
3011 				RXDMA_MONITOR_STATUS);
3012 			htt_srng_setup(soc->htt_handle, i,
3013 				pdev->rxdma_mon_desc_ring.hal_srng,
3014 				RXDMA_MONITOR_DESC);
3015 			htt_srng_setup(soc->htt_handle, i,
3016 				pdev->rxdma_err_dst_ring[ring_idx].hal_srng,
3017 				RXDMA_DST);
3018 		}
3019 	}
3020 }
3021 #endif
3022 
3023 /*
3024  * dp_soc_attach_target_wifi3() - SOC initialization in the target
3025  * @txrx_soc: Datapath SOC handle
3026  */
3027 static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
3028 {
3029 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
3030 
3031 	htt_soc_attach_target(soc->htt_handle);
3032 
3033 	dp_rxdma_ring_config(soc);
3034 
3035 	DP_STATS_INIT(soc);
3036 
3037 	/* initialize work queue for stats processing */
3038 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
3039 
3040 	return 0;
3041 }
3042 
3043 /*
3044  * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
3045  * @txrx_soc: Datapath SOC handle
3046  */
3047 static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
3048 {
3049 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3050 	return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
3051 }
3052 /*
3053  * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
3054  * @txrx_soc: Datapath SOC handle
3055  * @nss_cfg: nss config
3056  */
3057 static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
3058 {
3059 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3060 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
3061 
3062 	wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
3063 
3064 	/*
3065 	 * TODO: masked out based on the per offloaded radio
3066 	 */
3067 	if (config == dp_nss_cfg_dbdc) {
3068 		wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
3069 		wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
3070 		wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
3071 		wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
3072 	}
3073 
3074 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3075 				FL("nss-wifi<0> nss config is enabled"));
3076 }
3077 /*
3078 * dp_vdev_attach_wifi3() - attach txrx vdev
3079 * @txrx_pdev: Datapath PDEV handle
3080 * @vdev_mac_addr: MAC address of the virtual interface
3081 * @vdev_id: VDEV Id
3082 * @wlan_op_mode: VDEV operating mode
3083 *
3084 * Return: DP VDEV handle on success, NULL on failure
3085 */
3086 static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
3087 	uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
3088 {
3089 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3090 	struct dp_soc *soc = pdev->soc;
3091 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
3092 	int tx_ring_size;
3093 
3094 	if (!vdev) {
3095 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3096 			FL("DP VDEV memory allocation failed"));
3097 		goto fail0;
3098 	}
3099 
3100 	vdev->pdev = pdev;
3101 	vdev->vdev_id = vdev_id;
3102 	vdev->opmode = op_mode;
3103 	vdev->osdev = soc->osdev;
3104 
3105 	vdev->osif_rx = NULL;
3106 	vdev->osif_rsim_rx_decap = NULL;
3107 	vdev->osif_get_key = NULL;
3108 	vdev->osif_rx_mon = NULL;
3109 	vdev->osif_tx_free_ext = NULL;
3110 	vdev->osif_vdev = NULL;
3111 
3112 	vdev->delete.pending = 0;
3113 	vdev->safemode = 0;
3114 	vdev->drop_unenc = 1;
3115 	vdev->sec_type = cdp_sec_type_none;
3116 #ifdef notyet
3117 	vdev->filters_num = 0;
3118 #endif
3119 
3120 	qdf_mem_copy(
3121 		&vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3122 
3123 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3124 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3125 	vdev->dscp_tid_map_id = 0;
3126 	vdev->mcast_enhancement_en = 0;
3127 	tx_ring_size = wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
3128 
3129 	/* TODO: Initialize default HTT meta data that will be used in
3130 	 * TCL descriptors for packets transmitted from this VDEV
3131 	 */
3132 
3133 	TAILQ_INIT(&vdev->peer_list);
3134 
3135 	/* add this vdev into the pdev's list */
3136 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
3137 	pdev->vdev_count++;
3138 
3139 	dp_tx_vdev_attach(vdev);
3140 
3141 	if (QDF_STATUS_SUCCESS != dp_tx_flow_pool_map_handler(pdev, vdev_id,
3142 					FLOW_TYPE_VDEV, vdev_id, tx_ring_size))
3143 		goto fail1;
3144 
3145 
3146 	if ((soc->intr_mode == DP_INTR_POLL) &&
3147 			wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
3148 		if (pdev->vdev_count == 1)
3149 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
3150 	}
3151 
3152 	dp_lro_hash_setup(soc);
3153 
3154 	/* LRO */
3155 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3156 		wlan_op_mode_sta == vdev->opmode)
3157 		vdev->lro_enable = true;
3158 
3159 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3160 		 "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable);
3161 
3162 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3163 		"Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
3164 	DP_STATS_INIT(vdev);
3165 
3166 	if (wlan_op_mode_sta == vdev->opmode)
3167 		dp_peer_create_wifi3((struct cdp_vdev *)vdev,
3168 							vdev->mac_addr.raw);
3169 
3170 	return (struct cdp_vdev *)vdev;
3171 
3172 fail1:
3173 	dp_tx_vdev_detach(vdev);
3174 	qdf_mem_free(vdev);
3175 fail0:
3176 	return NULL;
3177 }
3178 
3179 /**
3180  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
3181  * @vdev: Datapath VDEV handle
3182  * @osif_vdev: OSIF vdev handle
3183  * @txrx_ops: Tx and Rx operations
3184  *
3185  * Return: DP VDEV handle on success, NULL on failure
3186  */
3187 static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
3188 	void *osif_vdev,
3189 	struct ol_txrx_ops *txrx_ops)
3190 {
3191 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3192 	vdev->osif_vdev = osif_vdev;
3193 	vdev->osif_rx = txrx_ops->rx.rx;
3194 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
3195 	vdev->osif_get_key = txrx_ops->get_key;
3196 	vdev->osif_rx_mon = txrx_ops->rx.mon;
3197 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
3198 #ifdef notyet
3199 #if ATH_SUPPORT_WAPI
3200 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
3201 #endif
3202 #endif
3203 #ifdef UMAC_SUPPORT_PROXY_ARP
3204 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
3205 #endif
3206 	vdev->me_convert = txrx_ops->me_convert;
3207 
3208 	/* TODO: Enable the following once Tx code is integrated */
3209 	if (vdev->mesh_vdev)
3210 		txrx_ops->tx.tx = dp_tx_send_mesh;
3211 	else
3212 		txrx_ops->tx.tx = dp_tx_send;
3213 
3214 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
3215 
3216 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
3217 		"DP Vdev Register success");
3218 }
3219 
3220 /**
3221  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
3222  * @vdev: Datapath VDEV handle
3223  *
3224  * Return: void
3225  */
3226 static void dp_vdev_flush_peers(struct dp_vdev *vdev)
3227 {
3228 	struct dp_pdev *pdev = vdev->pdev;
3229 	struct dp_soc *soc = pdev->soc;
3230 	struct dp_peer *peer;
3231 	uint16_t *peer_ids;
3232 	uint8_t i = 0, j = 0;
3233 
3234 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
3235 	if (!peer_ids) {
3236 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3237 			"DP alloc failure - unable to flush peers");
3238 		return;
3239 	}
3240 
3241 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3242 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3243 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3244 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
3245 				if (j < soc->max_peers)
3246 					peer_ids[j++] = peer->peer_ids[i];
3247 	}
3248 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3249 
3250 	for (i = 0; i < j ; i++)
3251 		dp_rx_peer_unmap_handler(soc, peer_ids[i]);
3252 
3253 	qdf_mem_free(peer_ids);
3254 
3255 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3256 		FL("Flushed peers for vdev object %pK "), vdev);
3257 }
3258 
3259 /*
3260  * dp_vdev_detach_wifi3() - Detach txrx vdev
3261  * @txrx_vdev:		Datapath VDEV handle
3262  * @callback:		Callback OL_IF on completion of detach
3263  * @cb_context:	Callback context
3264  *
3265  */
3266 static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
3267 	ol_txrx_vdev_delete_cb callback, void *cb_context)
3268 {
3269 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3270 	struct dp_pdev *pdev = vdev->pdev;
3271 	struct dp_soc *soc = pdev->soc;
3272 
3273 	/* preconditions */
3274 	qdf_assert(vdev);
3275 
3276 	/* remove the vdev from its parent pdev's list */
3277 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
3278 
3279 	if (wlan_op_mode_sta == vdev->opmode)
3280 		dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
3281 
3282 	/*
3283 	 * If Target is hung, flush all peers before detaching vdev
3284 	 * this will free all references held due to missing
3285 	 * unmap commands from Target
3286 	 */
3287 	if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
3288 		dp_vdev_flush_peers(vdev);
3289 
3290 	/*
3291 	 * Use peer_ref_mutex while accessing peer_list, in case
3292 	 * a peer is in the process of being removed from the list.
3293 	 */
3294 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3295 	/* check that the vdev has no peers allocated */
3296 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
3297 		/* debug print - will be removed later */
3298 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
3299 			FL("not deleting vdev object %pK (%pM)"
3300 			"until deletion finishes for all its peers"),
3301 			vdev, vdev->mac_addr.raw);
3302 		/* indicate that the vdev needs to be deleted */
3303 		vdev->delete.pending = 1;
3304 		vdev->delete.callback = callback;
3305 		vdev->delete.context = cb_context;
3306 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3307 		return;
3308 	}
3309 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3310 
3311 	dp_tx_flow_pool_unmap_handler(pdev, vdev->vdev_id, FLOW_TYPE_VDEV,
3312 		vdev->vdev_id);
3313 	dp_tx_vdev_detach(vdev);
3314 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3315 		FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
3316 
3317 	qdf_mem_free(vdev);
3318 
3319 	if (callback)
3320 		callback(cb_context);
3321 }
3322 
3323 /*
3324  * dp_peer_create_wifi3() - attach txrx peer
3325  * @txrx_vdev: Datapath VDEV handle
3326  * @peer_mac_addr: Peer MAC address
3327  *
3328  * Return: DP peeer handle on success, NULL on failure
3329  */
3330 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
3331 		uint8_t *peer_mac_addr)
3332 {
3333 	struct dp_peer *peer;
3334 	int i;
3335 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3336 	struct dp_pdev *pdev;
3337 	struct dp_soc *soc;
3338 
3339 	/* preconditions */
3340 	qdf_assert(vdev);
3341 	qdf_assert(peer_mac_addr);
3342 
3343 	pdev = vdev->pdev;
3344 	soc = pdev->soc;
3345 #ifdef notyet
3346 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
3347 		soc->mempool_ol_ath_peer);
3348 #else
3349 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
3350 #endif
3351 
3352 	if (!peer)
3353 		return NULL; /* failure */
3354 
3355 	qdf_mem_zero(peer, sizeof(struct dp_peer));
3356 
3357 	TAILQ_INIT(&peer->ast_entry_list);
3358 
3359 	/* store provided params */
3360 	peer->vdev = vdev;
3361 
3362 	dp_peer_add_ast(soc, peer, peer_mac_addr, CDP_TXRX_AST_TYPE_STATIC, 0);
3363 
3364 	qdf_spinlock_create(&peer->peer_info_lock);
3365 
3366 	qdf_mem_copy(
3367 		&peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3368 
3369 	/* TODO: See of rx_opt_proc is really required */
3370 	peer->rx_opt_proc = soc->rx_opt_proc;
3371 
3372 	/* initialize the peer_id */
3373 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3374 		peer->peer_ids[i] = HTT_INVALID_PEER;
3375 
3376 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3377 
3378 	qdf_atomic_init(&peer->ref_cnt);
3379 
3380 	/* keep one reference for attach */
3381 	qdf_atomic_inc(&peer->ref_cnt);
3382 
3383 	/* add this peer into the vdev's list */
3384 	if (wlan_op_mode_sta == vdev->opmode)
3385 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
3386 	else
3387 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
3388 
3389 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3390 
3391 	/* TODO: See if hash based search is required */
3392 	dp_peer_find_hash_add(soc, peer);
3393 
3394 	/* Initialize the peer state */
3395 	peer->state = OL_TXRX_PEER_STATE_DISC;
3396 
3397 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3398 		"vdev %pK created peer %pK (%pM) ref_cnt: %d",
3399 		vdev, peer, peer->mac_addr.raw,
3400 		qdf_atomic_read(&peer->ref_cnt));
3401 	/*
3402 	 * For every peer MAp message search and set if bss_peer
3403 	 */
3404 	if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
3405 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3406 			"vdev bss_peer!!!!");
3407 		peer->bss_peer = 1;
3408 		vdev->vap_bss_peer = peer;
3409 	}
3410 
3411 
3412 #ifndef CONFIG_WIN
3413 	dp_local_peer_id_alloc(pdev, peer);
3414 #endif
3415 	DP_STATS_INIT(peer);
3416 	return (void *)peer;
3417 }
3418 
3419 /*
3420  * dp_peer_setup_wifi3() - initialize the peer
3421  * @vdev_hdl: virtual device object
3422  * @peer: Peer object
3423  *
3424  * Return: void
3425  */
3426 static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
3427 {
3428 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
3429 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
3430 	struct dp_pdev *pdev;
3431 	struct dp_soc *soc;
3432 	bool hash_based = 0;
3433 	enum cdp_host_reo_dest_ring reo_dest;
3434 
3435 	/* preconditions */
3436 	qdf_assert(vdev);
3437 	qdf_assert(peer);
3438 
3439 	pdev = vdev->pdev;
3440 	soc = pdev->soc;
3441 
3442 	peer->last_assoc_rcvd = 0;
3443 	peer->last_disassoc_rcvd = 0;
3444 	peer->last_deauth_rcvd = 0;
3445 
3446 	/*
3447 	 * hash based steering is disabled for Radios which are offloaded
3448 	 * to NSS
3449 	 */
3450 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
3451 		hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
3452 
3453 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3454 		FL("hash based steering for pdev: %d is %d\n"),
3455 		pdev->pdev_id, hash_based);
3456 
3457 	/*
3458 	 * Below line of code will ensure the proper reo_dest ring is choosen
3459 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
3460 	 */
3461 	reo_dest = pdev->reo_dest;
3462 
3463 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
3464 		/* TODO: Check the destination ring number to be passed to FW */
3465 		soc->cdp_soc.ol_ops->peer_set_default_routing(
3466 			pdev->osif_pdev, peer->mac_addr.raw,
3467 			 peer->vdev->vdev_id, hash_based, reo_dest);
3468 	}
3469 
3470 	dp_peer_rx_init(pdev, peer);
3471 	return;
3472 }
3473 
3474 /*
3475  * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
3476  * @vdev_handle: virtual device object
3477  * @htt_pkt_type: type of pkt
3478  *
3479  * Return: void
3480  */
3481 static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
3482 	 enum htt_cmn_pkt_type val)
3483 {
3484 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3485 	vdev->tx_encap_type = val;
3486 }
3487 
3488 /*
3489  * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
3490  * @vdev_handle: virtual device object
3491  * @htt_pkt_type: type of pkt
3492  *
3493  * Return: void
3494  */
3495 static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
3496 	 enum htt_cmn_pkt_type val)
3497 {
3498 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3499 	vdev->rx_decap_type = val;
3500 }
3501 
3502 /*
3503  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
3504  * @pdev_handle: physical device object
3505  * @val: reo destination ring index (1 - 4)
3506  *
3507  * Return: void
3508  */
3509 static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
3510 	 enum cdp_host_reo_dest_ring val)
3511 {
3512 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3513 
3514 	if (pdev)
3515 		pdev->reo_dest = val;
3516 }
3517 
3518 /*
3519  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
3520  * @pdev_handle: physical device object
3521  *
3522  * Return: reo destination ring index
3523  */
3524 static enum cdp_host_reo_dest_ring
3525 dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
3526 {
3527 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3528 
3529 	if (pdev)
3530 		return pdev->reo_dest;
3531 	else
3532 		return cdp_host_reo_dest_ring_unknown;
3533 }
3534 
3535 #ifdef QCA_SUPPORT_SON
3536 static void dp_son_peer_authorize(struct dp_peer *peer)
3537 {
3538 	struct dp_soc *soc;
3539 	soc = peer->vdev->pdev->soc;
3540 	peer->peer_bs_inact_flag = 0;
3541 	peer->peer_bs_inact = soc->pdev_bs_inact_reload;
3542 	return;
3543 }
3544 #else
3545 static void dp_son_peer_authorize(struct dp_peer *peer)
3546 {
3547 	return;
3548 }
3549 #endif
3550 /*
3551  * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
3552  * @pdev_handle: device object
3553  * @val: value to be set
3554  *
3555  * Return: void
3556  */
3557 static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
3558 	 uint32_t val)
3559 {
3560 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3561 
3562 	/* Enable/Disable smart mesh filtering. This flag will be checked
3563 	 * during rx processing to check if packets are from NAC clients.
3564 	 */
3565 	pdev->filter_neighbour_peers = val;
3566 	return 0;
3567 }
3568 
3569 /*
3570  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
3571  * address for smart mesh filtering
3572  * @pdev_handle: device object
3573  * @cmd: Add/Del command
3574  * @macaddr: nac client mac address
3575  *
3576  * Return: void
3577  */
3578 static int dp_update_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
3579 	 uint32_t cmd, uint8_t *macaddr)
3580 {
3581 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3582 	struct dp_neighbour_peer *peer = NULL;
3583 
3584 	if (!macaddr)
3585 		goto fail0;
3586 
3587 	/* Store address of NAC (neighbour peer) which will be checked
3588 	 * against TA of received packets.
3589 	 */
3590 	if (cmd == DP_NAC_PARAM_ADD) {
3591 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
3592 				sizeof(*peer));
3593 
3594 		if (!peer) {
3595 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3596 				FL("DP neighbour peer node memory allocation failed"));
3597 			goto fail0;
3598 		}
3599 
3600 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
3601 			macaddr, DP_MAC_ADDR_LEN);
3602 
3603 
3604 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3605 		/* add this neighbour peer into the list */
3606 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
3607 				neighbour_peer_list_elem);
3608 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3609 
3610 		return 1;
3611 
3612 	} else if (cmd == DP_NAC_PARAM_DEL) {
3613 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3614 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
3615 				neighbour_peer_list_elem) {
3616 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
3617 				macaddr, DP_MAC_ADDR_LEN)) {
3618 				/* delete this peer from the list */
3619 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
3620 					peer, neighbour_peer_list_elem);
3621 				qdf_mem_free(peer);
3622 				break;
3623 			}
3624 		}
3625 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3626 
3627 		return 1;
3628 
3629 	}
3630 
3631 fail0:
3632 	return 0;
3633 }
3634 
3635 /*
3636  * dp_get_sec_type() - Get the security type
3637  * @peer:		Datapath peer handle
3638  * @sec_idx:    Security id (mcast, ucast)
3639  *
3640  * return sec_type: Security type
3641  */
3642 static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
3643 {
3644 	struct dp_peer *dpeer = (struct dp_peer *)peer;
3645 
3646 	return dpeer->security[sec_idx].sec_type;
3647 }
3648 
3649 /*
3650  * dp_peer_authorize() - authorize txrx peer
3651  * @peer_handle:		Datapath peer handle
3652  * @authorize
3653  *
3654  */
3655 static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
3656 {
3657 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
3658 	struct dp_soc *soc;
3659 
3660 	if (peer != NULL) {
3661 		soc = peer->vdev->pdev->soc;
3662 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
3663 		dp_son_peer_authorize(peer);
3664 		peer->authorize = authorize ? 1 : 0;
3665 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3666 	}
3667 }
3668 
3669 #ifdef QCA_SUPPORT_SON
3670 /*
3671  * dp_txrx_update_inact_threshold() - Update inact timer threshold
3672  * @pdev_handle: Device handle
3673  * @new_threshold : updated threshold value
3674  *
3675  */
3676 static void
3677 dp_txrx_update_inact_threshold(struct cdp_pdev *pdev_handle,
3678 			       u_int16_t new_threshold)
3679 {
3680 	struct dp_vdev *vdev;
3681 	struct dp_peer *peer;
3682 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3683 	struct dp_soc *soc = pdev->soc;
3684 	u_int16_t old_threshold = soc->pdev_bs_inact_reload;
3685 
3686 	if (old_threshold == new_threshold)
3687 		return;
3688 
3689 	soc->pdev_bs_inact_reload = new_threshold;
3690 
3691 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3692 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
3693 		if (vdev->opmode != wlan_op_mode_ap)
3694 			continue;
3695 
3696 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3697 			if (!peer->authorize)
3698 				continue;
3699 
3700 			if (old_threshold - peer->peer_bs_inact >=
3701 					new_threshold) {
3702 				dp_mark_peer_inact((void *)peer, true);
3703 				peer->peer_bs_inact = 0;
3704 			} else {
3705 				peer->peer_bs_inact = new_threshold -
3706 					(old_threshold - peer->peer_bs_inact);
3707 			}
3708 		}
3709 	}
3710 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3711 }
3712 
3713 /**
3714  * dp_txrx_reset_inact_count(): Reset inact count
3715  * @pdev_handle - device handle
3716  *
3717  * Return: void
3718  */
3719 static void
3720 dp_txrx_reset_inact_count(struct cdp_pdev *pdev_handle)
3721 {
3722 	struct dp_vdev *vdev = NULL;
3723 	struct dp_peer *peer = NULL;
3724 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3725 	struct dp_soc *soc = pdev->soc;
3726 
3727 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3728 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
3729 		if (vdev->opmode != wlan_op_mode_ap)
3730 			continue;
3731 
3732 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3733 			if (!peer->authorize)
3734 				continue;
3735 
3736 			peer->peer_bs_inact = soc->pdev_bs_inact_reload;
3737 		}
3738 	}
3739 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3740 }
3741 
3742 /**
3743  * dp_set_inact_params(): set inactivity params
3744  * @pdev_handle - device handle
3745  * @inact_check_interval - inactivity interval
3746  * @inact_normal - Inactivity normal
3747  * @inact_overload - Inactivity overload
3748  *
3749  * Return: bool
3750  */
3751 bool dp_set_inact_params(struct cdp_pdev *pdev_handle,
3752 			 u_int16_t inact_check_interval,
3753 			 u_int16_t inact_normal, u_int16_t inact_overload)
3754 {
3755 	struct dp_soc *soc;
3756 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3757 
3758 	if (!pdev)
3759 		return false;
3760 
3761 	soc = pdev->soc;
3762 	if (!soc)
3763 		return false;
3764 
3765 	soc->pdev_bs_inact_interval = inact_check_interval;
3766 	soc->pdev_bs_inact_normal = inact_normal;
3767 	soc->pdev_bs_inact_overload = inact_overload;
3768 
3769 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
3770 					soc->pdev_bs_inact_normal);
3771 
3772 	return true;
3773 }
3774 
3775 /**
3776  * dp_start_inact_timer(): Inactivity timer start
3777  * @pdev_handle - device handle
3778  * @enable - Inactivity timer start/stop
3779  *
3780  * Return: bool
3781  */
3782 bool dp_start_inact_timer(struct cdp_pdev *pdev_handle, bool enable)
3783 {
3784 	struct dp_soc *soc;
3785 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3786 
3787 	if (!pdev)
3788 		return false;
3789 
3790 	soc = pdev->soc;
3791 	if (!soc)
3792 		return false;
3793 
3794 	if (enable) {
3795 		dp_txrx_reset_inact_count((struct cdp_pdev *)pdev);
3796 		qdf_timer_mod(&soc->pdev_bs_inact_timer,
3797 			      soc->pdev_bs_inact_interval * 1000);
3798 	} else {
3799 		qdf_timer_stop(&soc->pdev_bs_inact_timer);
3800 	}
3801 
3802 	return true;
3803 }
3804 
3805 /**
3806  * dp_set_overload(): Set inactivity overload
3807  * @pdev_handle - device handle
3808  * @overload - overload status
3809  *
3810  * Return: void
3811  */
3812 void dp_set_overload(struct cdp_pdev *pdev_handle, bool overload)
3813 {
3814 	struct dp_soc *soc;
3815 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3816 
3817 	if (!pdev)
3818 		return;
3819 
3820 	soc = pdev->soc;
3821 	if (!soc)
3822 		return;
3823 
3824 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
3825 			overload ? soc->pdev_bs_inact_overload :
3826 			soc->pdev_bs_inact_normal);
3827 }
3828 
3829 /**
3830  * dp_peer_is_inact(): check whether peer is inactive
3831  * @peer_handle - datapath peer handle
3832  *
3833  * Return: bool
3834  */
3835 bool dp_peer_is_inact(void *peer_handle)
3836 {
3837 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
3838 
3839 	if (!peer)
3840 		return false;
3841 
3842 	return peer->peer_bs_inact_flag == 1;
3843 }
3844 
3845 /**
3846  * dp_init_inact_timer: initialize the inact timer
3847  * @soc - SOC handle
3848  *
3849  * Return: void
3850  */
3851 void dp_init_inact_timer(struct dp_soc *soc)
3852 {
3853 	qdf_timer_init(soc->osdev, &soc->pdev_bs_inact_timer,
3854 		dp_txrx_peer_find_inact_timeout_handler,
3855 		(void *)soc, QDF_TIMER_TYPE_WAKE_APPS);
3856 }
3857 
3858 #else
3859 
3860 bool dp_set_inact_params(struct cdp_pdev *pdev, u_int16_t inact_check_interval,
3861 			 u_int16_t inact_normal, u_int16_t inact_overload)
3862 {
3863 	return false;
3864 }
3865 
3866 bool dp_start_inact_timer(struct cdp_pdev *pdev, bool enable)
3867 {
3868 	return false;
3869 }
3870 
3871 void dp_set_overload(struct cdp_pdev *pdev, bool overload)
3872 {
3873 	return;
3874 }
3875 
3876 void dp_init_inact_timer(struct dp_soc *soc)
3877 {
3878 	return;
3879 }
3880 
3881 bool dp_peer_is_inact(void *peer)
3882 {
3883 	return false;
3884 }
3885 #endif
3886 
3887 /*
3888  * dp_peer_unref_delete() - unref and delete peer
3889  * @peer_handle:		Datapath peer handle
3890  *
3891  */
3892 void dp_peer_unref_delete(void *peer_handle)
3893 {
3894 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
3895 	struct dp_peer *bss_peer = NULL;
3896 	struct dp_vdev *vdev = peer->vdev;
3897 	struct dp_pdev *pdev = vdev->pdev;
3898 	struct dp_soc *soc = pdev->soc;
3899 	struct dp_peer *tmppeer;
3900 	int found = 0;
3901 	uint16_t peer_id;
3902 	uint16_t vdev_id;
3903 
3904 	/*
3905 	 * Hold the lock all the way from checking if the peer ref count
3906 	 * is zero until the peer references are removed from the hash
3907 	 * table and vdev list (if the peer ref count is zero).
3908 	 * This protects against a new HL tx operation starting to use the
3909 	 * peer object just after this function concludes it's done being used.
3910 	 * Furthermore, the lock needs to be held while checking whether the
3911 	 * vdev's list of peers is empty, to make sure that list is not modified
3912 	 * concurrently with the empty check.
3913 	 */
3914 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3915 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3916 		  "%s: peer %pK ref_cnt(before decrement): %d\n", __func__,
3917 		  peer, qdf_atomic_read(&peer->ref_cnt));
3918 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
3919 		peer_id = peer->peer_ids[0];
3920 		vdev_id = vdev->vdev_id;
3921 
3922 		/*
3923 		 * Make sure that the reference to the peer in
3924 		 * peer object map is removed
3925 		 */
3926 		if (peer_id != HTT_INVALID_PEER)
3927 			soc->peer_id_to_obj_map[peer_id] = NULL;
3928 
3929 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3930 			"Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
3931 
3932 		/* remove the reference to the peer from the hash table */
3933 		dp_peer_find_hash_remove(soc, peer);
3934 
3935 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
3936 			if (tmppeer == peer) {
3937 				found = 1;
3938 				break;
3939 			}
3940 		}
3941 		if (found) {
3942 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
3943 				peer_list_elem);
3944 		} else {
3945 			/*Ignoring the remove operation as peer not found*/
3946 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
3947 				"peer %pK not found in vdev (%pK)->peer_list:%pK",
3948 				peer, vdev, &peer->vdev->peer_list);
3949 		}
3950 
3951 		/* cleanup the peer data */
3952 		dp_peer_cleanup(vdev, peer);
3953 
3954 		/* check whether the parent vdev has no peers left */
3955 		if (TAILQ_EMPTY(&vdev->peer_list)) {
3956 			/*
3957 			 * Now that there are no references to the peer, we can
3958 			 * release the peer reference lock.
3959 			 */
3960 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3961 			/*
3962 			 * Check if the parent vdev was waiting for its peers
3963 			 * to be deleted, in order for it to be deleted too.
3964 			 */
3965 			if (vdev->delete.pending) {
3966 				ol_txrx_vdev_delete_cb vdev_delete_cb =
3967 					vdev->delete.callback;
3968 				void *vdev_delete_context =
3969 					vdev->delete.context;
3970 
3971 				QDF_TRACE(QDF_MODULE_ID_DP,
3972 					QDF_TRACE_LEVEL_INFO_HIGH,
3973 					FL("deleting vdev object %pK (%pM)"
3974 					" - its last peer is done"),
3975 					vdev, vdev->mac_addr.raw);
3976 				/* all peers are gone, go ahead and delete it */
3977 				dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
3978 								FLOW_TYPE_VDEV,
3979 								vdev_id);
3980 				dp_tx_vdev_detach(vdev);
3981 				QDF_TRACE(QDF_MODULE_ID_DP,
3982 					QDF_TRACE_LEVEL_INFO_HIGH,
3983 					FL("deleting vdev object %pK (%pM)"),
3984 					vdev, vdev->mac_addr.raw);
3985 
3986 				qdf_mem_free(vdev);
3987 				vdev = NULL;
3988 				if (vdev_delete_cb)
3989 					vdev_delete_cb(vdev_delete_context);
3990 			}
3991 		} else {
3992 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3993 		}
3994 
3995 		if (vdev) {
3996 			if (vdev->vap_bss_peer == peer) {
3997 				vdev->vap_bss_peer = NULL;
3998 			}
3999 		}
4000 
4001 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
4002 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->osif_pdev,
4003 					vdev_id, peer->mac_addr.raw);
4004 		}
4005 
4006 		if (!vdev || !vdev->vap_bss_peer) {
4007 			goto free_peer;
4008 		}
4009 
4010 #ifdef notyet
4011 		qdf_mempool_free(soc->osdev, soc->mempool_ol_ath_peer, peer);
4012 #else
4013 		bss_peer = vdev->vap_bss_peer;
4014 		DP_UPDATE_STATS(bss_peer, peer);
4015 
4016 free_peer:
4017 		qdf_mem_free(peer);
4018 
4019 #endif
4020 	} else {
4021 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4022 	}
4023 }
4024 
4025 /*
4026  * dp_peer_detach_wifi3() – Detach txrx peer
4027  * @peer_handle: Datapath peer handle
4028  * @bitmap: bitmap indicating special handling of request.
4029  *
4030  */
4031 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
4032 {
4033 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4034 
4035 	/* redirect the peer's rx delivery function to point to a
4036 	 * discard func
4037 	 */
4038 	peer->rx_opt_proc = dp_rx_discard;
4039 
4040 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4041 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
4042 
4043 #ifndef CONFIG_WIN
4044 	dp_local_peer_id_free(peer->vdev->pdev, peer);
4045 #endif
4046 	qdf_spinlock_destroy(&peer->peer_info_lock);
4047 
4048 	/*
4049 	 * Remove the reference added during peer_attach.
4050 	 * The peer will still be left allocated until the
4051 	 * PEER_UNMAP message arrives to remove the other
4052 	 * reference, added by the PEER_MAP message.
4053 	 */
4054 	dp_peer_unref_delete(peer_handle);
4055 }
4056 
4057 /*
4058  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
4059  * @peer_handle:		Datapath peer handle
4060  *
4061  */
4062 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
4063 {
4064 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4065 	return vdev->mac_addr.raw;
4066 }
4067 
4068 /*
4069  * dp_vdev_set_wds() - Enable per packet stats
4070  * @vdev_handle: DP VDEV handle
4071  * @val: value
4072  *
4073  * Return: none
4074  */
4075 static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
4076 {
4077 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4078 
4079 	vdev->wds_enabled = val;
4080 	return 0;
4081 }
4082 
4083 /*
4084  * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
4085  * @peer_handle:		Datapath peer handle
4086  *
4087  */
4088 static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
4089 						uint8_t vdev_id)
4090 {
4091 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
4092 	struct dp_vdev *vdev = NULL;
4093 
4094 	if (qdf_unlikely(!pdev))
4095 		return NULL;
4096 
4097 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4098 		if (vdev->vdev_id == vdev_id)
4099 			break;
4100 	}
4101 
4102 	return (struct cdp_vdev *)vdev;
4103 }
4104 
4105 static int dp_get_opmode(struct cdp_vdev *vdev_handle)
4106 {
4107 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4108 
4109 	return vdev->opmode;
4110 }
4111 
4112 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
4113 {
4114 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4115 	struct dp_pdev *pdev = vdev->pdev;
4116 
4117 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
4118 }
4119 
4120 /**
4121  * dp_reset_monitor_mode() - Disable monitor mode
4122  * @pdev_handle: Datapath PDEV handle
4123  *
4124  * Return: 0 on success, not 0 on failure
4125  */
4126 static int dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
4127 {
4128 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4129 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4130 	struct dp_soc *soc;
4131 	uint8_t pdev_id;
4132 
4133 	pdev_id = pdev->pdev_id;
4134 	soc = pdev->soc;
4135 
4136 	pdev->monitor_vdev = NULL;
4137 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4138 
4139 	htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4140 		pdev->rxdma_mon_buf_ring.hal_srng,
4141 		RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4142 
4143 	htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4144 		pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
4145 		RX_BUFFER_SIZE, &htt_tlv_filter);
4146 
4147 	return 0;
4148 }
4149 
4150 /**
4151  * dp_set_nac() - set peer_nac
4152  * @peer_handle: Datapath PEER handle
4153  *
4154  * Return: void
4155  */
4156 static void dp_set_nac(struct cdp_peer *peer_handle)
4157 {
4158 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4159 
4160 	peer->nac = 1;
4161 }
4162 
4163 /**
4164  * dp_get_tx_pending() - read pending tx
4165  * @pdev_handle: Datapath PDEV handle
4166  *
4167  * Return: outstanding tx
4168  */
4169 static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
4170 {
4171 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4172 
4173 	return qdf_atomic_read(&pdev->num_tx_outstanding);
4174 }
4175 
4176 /**
4177  * dp_get_peer_mac_from_peer_id() - get peer mac
4178  * @pdev_handle: Datapath PDEV handle
4179  * @peer_id: Peer ID
4180  * @peer_mac: MAC addr of PEER
4181  *
4182  * Return: void
4183  */
4184 static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
4185 	uint32_t peer_id, uint8_t *peer_mac)
4186 {
4187 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4188 	struct dp_peer *peer;
4189 
4190 	if (pdev && peer_mac) {
4191 		peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
4192 		if (peer && peer->mac_addr.raw) {
4193 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
4194 					DP_MAC_ADDR_LEN);
4195 		}
4196 	}
4197 }
4198 
4199 /**
4200  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
4201  * @vdev_handle: Datapath VDEV handle
4202  * @smart_monitor: Flag to denote if its smart monitor mode
4203  *
4204  * Return: 0 on success, not 0 on failure
4205  */
4206 static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
4207 		uint8_t smart_monitor)
4208 {
4209 	/* Many monitor VAPs can exists in a system but only one can be up at
4210 	 * anytime
4211 	 */
4212 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4213 	struct dp_pdev *pdev;
4214 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4215 	struct dp_soc *soc;
4216 	uint8_t pdev_id;
4217 
4218 	qdf_assert(vdev);
4219 
4220 	pdev = vdev->pdev;
4221 	pdev_id = pdev->pdev_id;
4222 	soc = pdev->soc;
4223 
4224 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4225 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
4226 		pdev, pdev_id, soc, vdev);
4227 
4228 	/*Check if current pdev's monitor_vdev exists */
4229 	if (pdev->monitor_vdev) {
4230 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4231 			"vdev=%pK\n", vdev);
4232 		qdf_assert(vdev);
4233 	}
4234 
4235 	pdev->monitor_vdev = vdev;
4236 
4237 	/* If smart monitor mode, do not configure monitor ring */
4238 	if (smart_monitor)
4239 		return QDF_STATUS_SUCCESS;
4240 
4241 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4242 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]\n",
4243 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4244 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4245 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4246 		pdev->mo_data_filter);
4247 
4248 	htt_tlv_filter.mpdu_start = 1;
4249 	htt_tlv_filter.msdu_start = 1;
4250 	htt_tlv_filter.packet = 1;
4251 	htt_tlv_filter.msdu_end = 1;
4252 	htt_tlv_filter.mpdu_end = 1;
4253 	htt_tlv_filter.packet_header = 1;
4254 	htt_tlv_filter.attention = 1;
4255 	htt_tlv_filter.ppdu_start = 0;
4256 	htt_tlv_filter.ppdu_end = 0;
4257 	htt_tlv_filter.ppdu_end_user_stats = 0;
4258 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4259 	htt_tlv_filter.ppdu_end_status_done = 0;
4260 	htt_tlv_filter.header_per_msdu = 1;
4261 	htt_tlv_filter.enable_fp =
4262 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4263 	htt_tlv_filter.enable_md = 0;
4264 	htt_tlv_filter.enable_mo =
4265 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4266 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4267 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4268 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4269 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4270 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4271 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4272 
4273 	htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4274 		pdev->rxdma_mon_buf_ring.hal_srng,
4275 		RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4276 
4277 	htt_tlv_filter.mpdu_start = 1;
4278 	htt_tlv_filter.msdu_start = 1;
4279 	htt_tlv_filter.packet = 0;
4280 	htt_tlv_filter.msdu_end = 1;
4281 	htt_tlv_filter.mpdu_end = 1;
4282 	htt_tlv_filter.packet_header = 1;
4283 	htt_tlv_filter.attention = 1;
4284 	htt_tlv_filter.ppdu_start = 1;
4285 	htt_tlv_filter.ppdu_end = 1;
4286 	htt_tlv_filter.ppdu_end_user_stats = 1;
4287 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4288 	htt_tlv_filter.ppdu_end_status_done = 1;
4289 	htt_tlv_filter.header_per_msdu = 0;
4290 	htt_tlv_filter.enable_fp =
4291 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4292 	htt_tlv_filter.enable_md = 0;
4293 	htt_tlv_filter.enable_mo =
4294 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4295 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4296 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4297 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4298 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4299 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4300 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4301 
4302 	htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4303 		pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
4304 		RX_BUFFER_SIZE, &htt_tlv_filter);
4305 
4306 	return QDF_STATUS_SUCCESS;
4307 }
4308 
4309 /**
4310  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
4311  * @pdev_handle: Datapath PDEV handle
4312  * @filter_val: Flag to select Filter for monitor mode
4313  * Return: 0 on success, not 0 on failure
4314  */
4315 static int dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
4316 	struct cdp_monitor_filter *filter_val)
4317 {
4318 	/* Many monitor VAPs can exists in a system but only one can be up at
4319 	 * anytime
4320 	 */
4321 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4322 	struct dp_vdev *vdev = pdev->monitor_vdev;
4323 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4324 	struct dp_soc *soc;
4325 	uint8_t pdev_id;
4326 
4327 	pdev_id = pdev->pdev_id;
4328 	soc = pdev->soc;
4329 
4330 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4331 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
4332 		pdev, pdev_id, soc, vdev);
4333 
4334 	/*Check if current pdev's monitor_vdev exists */
4335 	if (!pdev->monitor_vdev) {
4336 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4337 			"vdev=%pK\n", vdev);
4338 		qdf_assert(vdev);
4339 	}
4340 
4341 	/* update filter mode, type in pdev structure */
4342 	pdev->mon_filter_mode = filter_val->mode;
4343 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
4344 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
4345 	pdev->fp_data_filter = filter_val->fp_data;
4346 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
4347 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
4348 	pdev->mo_data_filter = filter_val->mo_data;
4349 
4350 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4351 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]\n",
4352 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4353 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4354 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4355 		pdev->mo_data_filter);
4356 
4357 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4358 
4359 	htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4360 		pdev->rxdma_mon_buf_ring.hal_srng,
4361 		RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4362 
4363 	htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4364 		pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
4365 		RX_BUFFER_SIZE, &htt_tlv_filter);
4366 
4367 	htt_tlv_filter.mpdu_start = 1;
4368 	htt_tlv_filter.msdu_start = 1;
4369 	htt_tlv_filter.packet = 1;
4370 	htt_tlv_filter.msdu_end = 1;
4371 	htt_tlv_filter.mpdu_end = 1;
4372 	htt_tlv_filter.packet_header = 1;
4373 	htt_tlv_filter.attention = 1;
4374 	htt_tlv_filter.ppdu_start = 0;
4375 	htt_tlv_filter.ppdu_end = 0;
4376 	htt_tlv_filter.ppdu_end_user_stats = 0;
4377 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4378 	htt_tlv_filter.ppdu_end_status_done = 0;
4379 	htt_tlv_filter.header_per_msdu = 1;
4380 	htt_tlv_filter.enable_fp =
4381 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4382 	htt_tlv_filter.enable_md = 0;
4383 	htt_tlv_filter.enable_mo =
4384 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4385 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4386 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4387 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4388 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4389 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4390 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4391 
4392 	htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4393 		pdev->rxdma_mon_buf_ring.hal_srng, RXDMA_MONITOR_BUF,
4394 		RX_BUFFER_SIZE, &htt_tlv_filter);
4395 
4396 	htt_tlv_filter.mpdu_start = 1;
4397 	htt_tlv_filter.msdu_start = 1;
4398 	htt_tlv_filter.packet = 0;
4399 	htt_tlv_filter.msdu_end = 1;
4400 	htt_tlv_filter.mpdu_end = 1;
4401 	htt_tlv_filter.packet_header = 1;
4402 	htt_tlv_filter.attention = 1;
4403 	htt_tlv_filter.ppdu_start = 1;
4404 	htt_tlv_filter.ppdu_end = 1;
4405 	htt_tlv_filter.ppdu_end_user_stats = 1;
4406 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4407 	htt_tlv_filter.ppdu_end_status_done = 1;
4408 	htt_tlv_filter.header_per_msdu = 0;
4409 	htt_tlv_filter.enable_fp =
4410 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4411 	htt_tlv_filter.enable_md = 0;
4412 	htt_tlv_filter.enable_mo =
4413 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4414 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4415 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4416 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4417 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4418 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4419 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4420 
4421 	htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4422 		pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
4423 		RX_BUFFER_SIZE, &htt_tlv_filter);
4424 
4425 	return QDF_STATUS_SUCCESS;
4426 }
4427 
4428 /**
4429  * dp_get_pdev_id_frm_pdev() - get pdev_id
4430  * @pdev_handle: Datapath PDEV handle
4431  *
4432  * Return: pdev_id
4433  */
4434 static
4435 uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
4436 {
4437 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4438 
4439 	return pdev->pdev_id;
4440 }
4441 
4442 /**
4443  * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
4444  * @vdev_handle: Datapath VDEV handle
4445  * Return: true on ucast filter flag set
4446  */
4447 static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
4448 {
4449 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4450 	struct dp_pdev *pdev;
4451 
4452 	pdev = vdev->pdev;
4453 
4454 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
4455 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
4456 		return true;
4457 
4458 	return false;
4459 }
4460 
4461 /**
4462  * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
4463  * @vdev_handle: Datapath VDEV handle
4464  * Return: true on mcast filter flag set
4465  */
4466 static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
4467 {
4468 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4469 	struct dp_pdev *pdev;
4470 
4471 	pdev = vdev->pdev;
4472 
4473 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
4474 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
4475 		return true;
4476 
4477 	return false;
4478 }
4479 
4480 /**
4481  * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
4482  * @vdev_handle: Datapath VDEV handle
4483  * Return: true on non data filter flag set
4484  */
4485 static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
4486 {
4487 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4488 	struct dp_pdev *pdev;
4489 
4490 	pdev = vdev->pdev;
4491 
4492 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
4493 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
4494 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
4495 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
4496 			return true;
4497 		}
4498 	}
4499 
4500 	return false;
4501 }
4502 
4503 #ifdef MESH_MODE_SUPPORT
4504 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
4505 {
4506 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4507 
4508 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4509 		FL("val %d"), val);
4510 	vdev->mesh_vdev = val;
4511 }
4512 
4513 /*
4514  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
4515  * @vdev_hdl: virtual device object
4516  * @val: value to be set
4517  *
4518  * Return: void
4519  */
4520 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
4521 {
4522 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4523 
4524 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4525 		FL("val %d"), val);
4526 	vdev->mesh_rx_filter = val;
4527 }
4528 #endif
4529 
4530 /*
4531  * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
4532  * Current scope is bar recieved count
4533  *
4534  * @pdev_handle: DP_PDEV handle
4535  *
4536  * Return: void
4537  */
4538 #define STATS_PROC_TIMEOUT        (HZ/1000)
4539 
4540 static void
4541 dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
4542 {
4543 	struct dp_vdev *vdev;
4544 	struct dp_peer *peer;
4545 	uint32_t waitcnt;
4546 
4547 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4548 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4549 			if (!peer) {
4550 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4551 					FL("DP Invalid Peer refernce"));
4552 				return;
4553 			}
4554 
4555 			if (peer->delete_in_progress) {
4556 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4557 					FL("DP Peer deletion in progress"));
4558 				continue;
4559 			}
4560 
4561 			qdf_atomic_inc(&peer->ref_cnt);
4562 			waitcnt = 0;
4563 			dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
4564 			while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
4565 				&& waitcnt < 10) {
4566 				schedule_timeout_interruptible(
4567 						STATS_PROC_TIMEOUT);
4568 				waitcnt++;
4569 			}
4570 			qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
4571 			dp_peer_unref_delete(peer);
4572 		}
4573 	}
4574 }
4575 
4576 /**
4577  * dp_rx_bar_stats_cb(): BAR received stats callback
4578  * @soc: SOC handle
4579  * @cb_ctxt: Call back context
4580  * @reo_status: Reo status
4581  *
4582  * return: void
4583  */
4584 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
4585 	union hal_reo_status *reo_status)
4586 {
4587 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
4588 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
4589 
4590 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
4591 		DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
4592 			queue_status->header.status);
4593 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
4594 		return;
4595 	}
4596 
4597 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
4598 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
4599 
4600 }
4601 
4602 /**
4603  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
4604  * @vdev: DP VDEV handle
4605  *
4606  * return: void
4607  */
4608 void dp_aggregate_vdev_stats(struct dp_vdev *vdev)
4609 {
4610 	struct dp_peer *peer = NULL;
4611 	struct dp_soc *soc = vdev->pdev->soc;
4612 
4613 	qdf_mem_set(&(vdev->stats.tx), sizeof(vdev->stats.tx), 0x0);
4614 	qdf_mem_set(&(vdev->stats.rx), sizeof(vdev->stats.rx), 0x0);
4615 
4616 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
4617 		DP_UPDATE_STATS(vdev, peer);
4618 
4619 	if (soc->cdp_soc.ol_ops->update_dp_stats)
4620 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->osif_pdev,
4621 			&vdev->stats, (uint16_t) vdev->vdev_id,
4622 			UPDATE_VDEV_STATS);
4623 
4624 }
4625 
4626 /**
4627  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
4628  * @pdev: DP PDEV handle
4629  *
4630  * return: void
4631  */
4632 static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
4633 {
4634 	struct dp_vdev *vdev = NULL;
4635 	struct dp_soc *soc = pdev->soc;
4636 
4637 	qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
4638 	qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
4639 	qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
4640 
4641 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4642 
4643 		dp_aggregate_vdev_stats(vdev);
4644 		DP_UPDATE_STATS(pdev, vdev);
4645 
4646 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
4647 
4648 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
4649 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
4650 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
4651 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
4652 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
4653 		DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
4654 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
4655 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host);
4656 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
4657 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host);
4658 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
4659 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
4660 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
4661 		DP_STATS_AGGR(pdev, vdev,
4662 				tx_i.mcast_en.dropped_map_error);
4663 		DP_STATS_AGGR(pdev, vdev,
4664 				tx_i.mcast_en.dropped_self_mac);
4665 		DP_STATS_AGGR(pdev, vdev,
4666 				tx_i.mcast_en.dropped_send_fail);
4667 		DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
4668 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
4669 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
4670 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
4671 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na);
4672 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
4673 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified);
4674 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw);
4675 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.exception_fw);
4676 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.completion_fw);
4677 
4678 		pdev->stats.tx_i.dropped.dropped_pkt.num =
4679 			pdev->stats.tx_i.dropped.dma_error +
4680 			pdev->stats.tx_i.dropped.ring_full +
4681 			pdev->stats.tx_i.dropped.enqueue_fail +
4682 			pdev->stats.tx_i.dropped.desc_na +
4683 			pdev->stats.tx_i.dropped.res_full;
4684 
4685 		pdev->stats.tx.last_ack_rssi =
4686 			vdev->stats.tx.last_ack_rssi;
4687 		pdev->stats.tx_i.tso.num_seg =
4688 			vdev->stats.tx_i.tso.num_seg;
4689 	}
4690 	if (soc->cdp_soc.ol_ops->update_dp_stats)
4691 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->osif_pdev,
4692 				&pdev->stats, pdev->pdev_id, UPDATE_PDEV_STATS);
4693 
4694 }
4695 
4696 /**
4697  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
4698  * @pdev: DP_PDEV Handle
4699  *
4700  * Return:void
4701  */
4702 static inline void
4703 dp_print_pdev_tx_stats(struct dp_pdev *pdev)
4704 {
4705 	uint8_t index = 0;
4706 	DP_PRINT_STATS("PDEV Tx Stats:\n");
4707 	DP_PRINT_STATS("Received From Stack:");
4708 	DP_PRINT_STATS("	Packets = %d",
4709 			pdev->stats.tx_i.rcvd.num);
4710 	DP_PRINT_STATS("	Bytes = %llu",
4711 			pdev->stats.tx_i.rcvd.bytes);
4712 	DP_PRINT_STATS("Processed:");
4713 	DP_PRINT_STATS("	Packets = %d",
4714 			pdev->stats.tx_i.processed.num);
4715 	DP_PRINT_STATS("	Bytes = %llu",
4716 			pdev->stats.tx_i.processed.bytes);
4717 	DP_PRINT_STATS("Total Completions:");
4718 	DP_PRINT_STATS("	Packets = %u",
4719 			pdev->stats.tx.comp_pkt.num);
4720 	DP_PRINT_STATS("	Bytes = %llu",
4721 			pdev->stats.tx.comp_pkt.bytes);
4722 	DP_PRINT_STATS("Successful Completions:");
4723 	DP_PRINT_STATS("	Packets = %u",
4724 			pdev->stats.tx.tx_success.num);
4725 	DP_PRINT_STATS("	Bytes = %llu",
4726 			pdev->stats.tx.tx_success.bytes);
4727 	DP_PRINT_STATS("Dropped:");
4728 	DP_PRINT_STATS("	Total = %d",
4729 			pdev->stats.tx_i.dropped.dropped_pkt.num);
4730 	DP_PRINT_STATS("	Dma_map_error = %d",
4731 			pdev->stats.tx_i.dropped.dma_error);
4732 	DP_PRINT_STATS("	Ring Full = %d",
4733 			pdev->stats.tx_i.dropped.ring_full);
4734 	DP_PRINT_STATS("	Descriptor Not available = %d",
4735 			pdev->stats.tx_i.dropped.desc_na);
4736 	DP_PRINT_STATS("	HW enqueue failed= %d",
4737 			pdev->stats.tx_i.dropped.enqueue_fail);
4738 	DP_PRINT_STATS("	Resources Full = %d",
4739 			pdev->stats.tx_i.dropped.res_full);
4740 	DP_PRINT_STATS("	FW removed = %d",
4741 			pdev->stats.tx.dropped.fw_rem);
4742 	DP_PRINT_STATS("	FW removed transmitted = %d",
4743 			pdev->stats.tx.dropped.fw_rem_tx);
4744 	DP_PRINT_STATS("	FW removed untransmitted = %d",
4745 			pdev->stats.tx.dropped.fw_rem_notx);
4746 	DP_PRINT_STATS("	FW removed untransmitted fw_reason1 = %d",
4747 			pdev->stats.tx.dropped.fw_reason1);
4748 	DP_PRINT_STATS("	FW removed untransmitted fw_reason2 = %d",
4749 			pdev->stats.tx.dropped.fw_reason2);
4750 	DP_PRINT_STATS("	FW removed untransmitted fw_reason3 = %d",
4751 			pdev->stats.tx.dropped.fw_reason3);
4752 	DP_PRINT_STATS("	Aged Out from msdu/mpdu queues = %d",
4753 			pdev->stats.tx.dropped.age_out);
4754 	DP_PRINT_STATS("Scatter Gather:");
4755 	DP_PRINT_STATS("	Packets = %d",
4756 			pdev->stats.tx_i.sg.sg_pkt.num);
4757 	DP_PRINT_STATS("	Bytes = %llu",
4758 			pdev->stats.tx_i.sg.sg_pkt.bytes);
4759 	DP_PRINT_STATS("	Dropped By Host = %d",
4760 			pdev->stats.tx_i.sg.dropped_host);
4761 	DP_PRINT_STATS("	Dropped By Target = %d",
4762 			pdev->stats.tx_i.sg.dropped_target);
4763 	DP_PRINT_STATS("TSO:");
4764 	DP_PRINT_STATS("	Number of Segments = %d",
4765 			pdev->stats.tx_i.tso.num_seg);
4766 	DP_PRINT_STATS("	Packets = %d",
4767 			pdev->stats.tx_i.tso.tso_pkt.num);
4768 	DP_PRINT_STATS("	Bytes = %llu",
4769 			pdev->stats.tx_i.tso.tso_pkt.bytes);
4770 	DP_PRINT_STATS("	Dropped By Host = %d",
4771 			pdev->stats.tx_i.tso.dropped_host);
4772 	DP_PRINT_STATS("Mcast Enhancement:");
4773 	DP_PRINT_STATS("	Packets = %d",
4774 			pdev->stats.tx_i.mcast_en.mcast_pkt.num);
4775 	DP_PRINT_STATS("	Bytes = %llu",
4776 			pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
4777 	DP_PRINT_STATS("	Dropped: Map Errors = %d",
4778 			pdev->stats.tx_i.mcast_en.dropped_map_error);
4779 	DP_PRINT_STATS("	Dropped: Self Mac = %d",
4780 			pdev->stats.tx_i.mcast_en.dropped_self_mac);
4781 	DP_PRINT_STATS("	Dropped: Send Fail = %d",
4782 			pdev->stats.tx_i.mcast_en.dropped_send_fail);
4783 	DP_PRINT_STATS("	Unicast sent = %d",
4784 			pdev->stats.tx_i.mcast_en.ucast);
4785 	DP_PRINT_STATS("Raw:");
4786 	DP_PRINT_STATS("	Packets = %d",
4787 			pdev->stats.tx_i.raw.raw_pkt.num);
4788 	DP_PRINT_STATS("	Bytes = %llu",
4789 			pdev->stats.tx_i.raw.raw_pkt.bytes);
4790 	DP_PRINT_STATS("	DMA map error = %d",
4791 			pdev->stats.tx_i.raw.dma_map_error);
4792 	DP_PRINT_STATS("Reinjected:");
4793 	DP_PRINT_STATS("	Packets = %d",
4794 			pdev->stats.tx_i.reinject_pkts.num);
4795 	DP_PRINT_STATS("Bytes = %llu\n",
4796 				pdev->stats.tx_i.reinject_pkts.bytes);
4797 	DP_PRINT_STATS("Inspected:");
4798 	DP_PRINT_STATS("	Packets = %d",
4799 			pdev->stats.tx_i.inspect_pkts.num);
4800 	DP_PRINT_STATS("	Bytes = %llu",
4801 			pdev->stats.tx_i.inspect_pkts.bytes);
4802 	DP_PRINT_STATS("Nawds Multicast:");
4803 	DP_PRINT_STATS("	Packets = %d",
4804 			pdev->stats.tx_i.nawds_mcast.num);
4805 	DP_PRINT_STATS("	Bytes = %llu",
4806 			pdev->stats.tx_i.nawds_mcast.bytes);
4807 	DP_PRINT_STATS("CCE Classified:");
4808 	DP_PRINT_STATS("	CCE Classified Packets: %u",
4809 			pdev->stats.tx_i.cce_classified);
4810 	DP_PRINT_STATS("	RAW CCE Classified Packets: %u",
4811 			pdev->stats.tx_i.cce_classified_raw);
4812 	DP_PRINT_STATS("Mesh stats:");
4813 	DP_PRINT_STATS("	frames to firmware: %u",
4814 			pdev->stats.tx_i.mesh.exception_fw);
4815 	DP_PRINT_STATS("	completions from fw: %u",
4816 			pdev->stats.tx_i.mesh.completion_fw);
4817 	DP_PRINT_STATS("PPDU stats counter");
4818 	for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
4819 		DP_PRINT_STATS("	Tag[%d] = %llu", index,
4820 				pdev->stats.ppdu_stats_counter[index]);
4821 	}
4822 }
4823 
4824 /**
4825  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
4826  * @pdev: DP_PDEV Handle
4827  *
4828  * Return: void
4829  */
4830 static inline void
4831 dp_print_pdev_rx_stats(struct dp_pdev *pdev)
4832 {
4833 	DP_PRINT_STATS("PDEV Rx Stats:\n");
4834 	DP_PRINT_STATS("Received From HW (Per Rx Ring):");
4835 	DP_PRINT_STATS("	Packets = %d %d %d %d",
4836 			pdev->stats.rx.rcvd_reo[0].num,
4837 			pdev->stats.rx.rcvd_reo[1].num,
4838 			pdev->stats.rx.rcvd_reo[2].num,
4839 			pdev->stats.rx.rcvd_reo[3].num);
4840 	DP_PRINT_STATS("	Bytes = %llu %llu %llu %llu",
4841 			pdev->stats.rx.rcvd_reo[0].bytes,
4842 			pdev->stats.rx.rcvd_reo[1].bytes,
4843 			pdev->stats.rx.rcvd_reo[2].bytes,
4844 			pdev->stats.rx.rcvd_reo[3].bytes);
4845 	DP_PRINT_STATS("Replenished:");
4846 	DP_PRINT_STATS("	Packets = %d",
4847 			pdev->stats.replenish.pkts.num);
4848 	DP_PRINT_STATS("	Bytes = %llu",
4849 			pdev->stats.replenish.pkts.bytes);
4850 	DP_PRINT_STATS("	Buffers Added To Freelist = %d",
4851 			pdev->stats.buf_freelist);
4852 	DP_PRINT_STATS("	Low threshold intr = %d",
4853 			pdev->stats.replenish.low_thresh_intrs);
4854 	DP_PRINT_STATS("Dropped:");
4855 	DP_PRINT_STATS("	msdu_not_done = %d",
4856 			pdev->stats.dropped.msdu_not_done);
4857 	DP_PRINT_STATS("        mon_rx_drop = %d",
4858 			pdev->stats.dropped.mon_rx_drop);
4859 	DP_PRINT_STATS("Sent To Stack:");
4860 	DP_PRINT_STATS("	Packets = %d",
4861 			pdev->stats.rx.to_stack.num);
4862 	DP_PRINT_STATS("	Bytes = %llu",
4863 			pdev->stats.rx.to_stack.bytes);
4864 	DP_PRINT_STATS("Multicast/Broadcast:");
4865 	DP_PRINT_STATS("	Packets = %d",
4866 			pdev->stats.rx.multicast.num);
4867 	DP_PRINT_STATS("	Bytes = %llu",
4868 			pdev->stats.rx.multicast.bytes);
4869 	DP_PRINT_STATS("Errors:");
4870 	DP_PRINT_STATS("	Rxdma Ring Un-inititalized = %d",
4871 			pdev->stats.replenish.rxdma_err);
4872 	DP_PRINT_STATS("	Desc Alloc Failed: = %d",
4873 			pdev->stats.err.desc_alloc_fail);
4874 
4875 	/* Get bar_recv_cnt */
4876 	dp_aggregate_pdev_ctrl_frames_stats(pdev);
4877 	DP_PRINT_STATS("BAR Received Count: = %d",
4878 			pdev->stats.rx.bar_recv_cnt);
4879 
4880 }
4881 
4882 /**
4883  * dp_print_soc_tx_stats(): Print SOC level  stats
4884  * @soc DP_SOC Handle
4885  *
4886  * Return: void
4887  */
4888 static inline void
4889 dp_print_soc_tx_stats(struct dp_soc *soc)
4890 {
4891 	DP_PRINT_STATS("SOC Tx Stats:\n");
4892 	DP_PRINT_STATS("Tx Descriptors In Use = %d",
4893 			soc->stats.tx.desc_in_use);
4894 	DP_PRINT_STATS("Invalid peer:");
4895 	DP_PRINT_STATS("	Packets = %d",
4896 			soc->stats.tx.tx_invalid_peer.num);
4897 	DP_PRINT_STATS("	Bytes = %llu",
4898 			soc->stats.tx.tx_invalid_peer.bytes);
4899 	DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
4900 			soc->stats.tx.tcl_ring_full[0],
4901 			soc->stats.tx.tcl_ring_full[1],
4902 			soc->stats.tx.tcl_ring_full[2]);
4903 
4904 }
4905 
4906 
4907 /**
4908  * dp_print_soc_rx_stats: Print SOC level Rx stats
4909  * @soc: DP_SOC Handle
4910  *
4911  * Return:void
4912  */
4913 static inline void
4914 dp_print_soc_rx_stats(struct dp_soc *soc)
4915 {
4916 	uint32_t i;
4917 	char reo_error[DP_REO_ERR_LENGTH];
4918 	char rxdma_error[DP_RXDMA_ERR_LENGTH];
4919 	uint8_t index = 0;
4920 
4921 	DP_PRINT_STATS("SOC Rx Stats:\n");
4922 	DP_PRINT_STATS("Errors:\n");
4923 	DP_PRINT_STATS("Rx Decrypt Errors = %d",
4924 			(soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
4925 			soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
4926 	DP_PRINT_STATS("Invalid RBM = %d",
4927 			soc->stats.rx.err.invalid_rbm);
4928 	DP_PRINT_STATS("Invalid Vdev = %d",
4929 			soc->stats.rx.err.invalid_vdev);
4930 	DP_PRINT_STATS("Invalid Pdev = %d",
4931 			soc->stats.rx.err.invalid_pdev);
4932 	DP_PRINT_STATS("Invalid Peer = %d",
4933 			soc->stats.rx.err.rx_invalid_peer.num);
4934 	DP_PRINT_STATS("HAL Ring Access Fail = %d",
4935 			soc->stats.rx.err.hal_ring_access_fail);
4936 
4937 	for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
4938 		index += qdf_snprint(&rxdma_error[index],
4939 				DP_RXDMA_ERR_LENGTH - index,
4940 				" %d", soc->stats.rx.err.rxdma_error[i]);
4941 	}
4942 	DP_PRINT_STATS("RXDMA Error (0-31):%s",
4943 			rxdma_error);
4944 
4945 	index = 0;
4946 	for (i = 0; i < HAL_REO_ERR_MAX; i++) {
4947 		index += qdf_snprint(&reo_error[index],
4948 				DP_REO_ERR_LENGTH - index,
4949 				" %d", soc->stats.rx.err.reo_error[i]);
4950 	}
4951 	DP_PRINT_STATS("REO Error(0-14):%s",
4952 			reo_error);
4953 }
4954 
4955 
4956 /**
4957  * dp_print_ring_stat_from_hal(): Print hal level ring stats
4958  * @soc: DP_SOC handle
4959  * @srng: DP_SRNG handle
4960  * @ring_name: SRNG name
4961  *
4962  * Return: void
4963  */
4964 static inline void
4965 dp_print_ring_stat_from_hal(struct dp_soc *soc,  struct dp_srng *srng,
4966 	char *ring_name)
4967 {
4968 	uint32_t tailp;
4969 	uint32_t headp;
4970 
4971 	if (srng->hal_srng != NULL) {
4972 		hal_api_get_tphp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
4973 		DP_PRINT_STATS("%s : Head pointer = %d  Tail Pointer = %d\n",
4974 				ring_name, headp, tailp);
4975 	}
4976 }
4977 
4978 /**
4979  * dp_print_ring_stats(): Print tail and head pointer
4980  * @pdev: DP_PDEV handle
4981  *
4982  * Return:void
4983  */
4984 static inline void
4985 dp_print_ring_stats(struct dp_pdev *pdev)
4986 {
4987 	uint32_t i;
4988 	char ring_name[STR_MAXLEN + 1];
4989 
4990 	dp_print_ring_stat_from_hal(pdev->soc,
4991 			&pdev->soc->reo_exception_ring,
4992 			"Reo Exception Ring");
4993 	dp_print_ring_stat_from_hal(pdev->soc,
4994 			&pdev->soc->reo_reinject_ring,
4995 			"Reo Inject Ring");
4996 	dp_print_ring_stat_from_hal(pdev->soc,
4997 			&pdev->soc->reo_cmd_ring,
4998 			"Reo Command Ring");
4999 	dp_print_ring_stat_from_hal(pdev->soc,
5000 			&pdev->soc->reo_status_ring,
5001 			"Reo Status Ring");
5002 	dp_print_ring_stat_from_hal(pdev->soc,
5003 			&pdev->soc->rx_rel_ring,
5004 			"Rx Release ring");
5005 	dp_print_ring_stat_from_hal(pdev->soc,
5006 			&pdev->soc->tcl_cmd_ring,
5007 			"Tcl command Ring");
5008 	dp_print_ring_stat_from_hal(pdev->soc,
5009 			&pdev->soc->tcl_status_ring,
5010 			"Tcl Status Ring");
5011 	dp_print_ring_stat_from_hal(pdev->soc,
5012 			&pdev->soc->wbm_desc_rel_ring,
5013 			"Wbm Desc Rel Ring");
5014 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5015 		snprintf(ring_name, STR_MAXLEN, "Reo Dest Ring %d", i);
5016 		dp_print_ring_stat_from_hal(pdev->soc,
5017 				&pdev->soc->reo_dest_ring[i],
5018 				ring_name);
5019 	}
5020 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
5021 		snprintf(ring_name, STR_MAXLEN, "Tcl Data Ring %d", i);
5022 		dp_print_ring_stat_from_hal(pdev->soc,
5023 				&pdev->soc->tcl_data_ring[i],
5024 				ring_name);
5025 	}
5026 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
5027 		snprintf(ring_name, STR_MAXLEN, "Tx Comp Ring %d", i);
5028 		dp_print_ring_stat_from_hal(pdev->soc,
5029 				&pdev->soc->tx_comp_ring[i],
5030 				ring_name);
5031 	}
5032 	dp_print_ring_stat_from_hal(pdev->soc,
5033 			&pdev->rx_refill_buf_ring,
5034 			"Rx Refill Buf Ring");
5035 
5036 	dp_print_ring_stat_from_hal(pdev->soc,
5037 			&pdev->rx_refill_buf_ring2,
5038 			"Second Rx Refill Buf Ring");
5039 
5040 	dp_print_ring_stat_from_hal(pdev->soc,
5041 			&pdev->rxdma_mon_buf_ring,
5042 			"Rxdma Mon Buf Ring");
5043 	dp_print_ring_stat_from_hal(pdev->soc,
5044 			&pdev->rxdma_mon_dst_ring,
5045 			"Rxdma Mon Dst Ring");
5046 	dp_print_ring_stat_from_hal(pdev->soc,
5047 			&pdev->rxdma_mon_status_ring,
5048 			"Rxdma Mon Status Ring");
5049 	dp_print_ring_stat_from_hal(pdev->soc,
5050 			&pdev->rxdma_mon_desc_ring,
5051 			"Rxdma mon desc Ring");
5052 
5053 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
5054 		snprintf(ring_name, STR_MAXLEN, "Rxdma err dst ring %d", i);
5055 		dp_print_ring_stat_from_hal(pdev->soc,
5056 			&pdev->rxdma_err_dst_ring[i],
5057 			ring_name);
5058 	}
5059 
5060 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
5061 		snprintf(ring_name, STR_MAXLEN, "Rx mac buf ring %d", i);
5062 		dp_print_ring_stat_from_hal(pdev->soc,
5063 				&pdev->rx_mac_buf_ring[i],
5064 				ring_name);
5065 	}
5066 }
5067 
5068 /**
5069  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
5070  * @vdev: DP_VDEV handle
5071  *
5072  * Return:void
5073  */
5074 static inline void
5075 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
5076 {
5077 	struct dp_peer *peer = NULL;
5078 	struct dp_soc *soc = (struct dp_soc *)vdev->pdev->soc;
5079 
5080 	DP_STATS_CLR(vdev->pdev);
5081 	DP_STATS_CLR(vdev->pdev->soc);
5082 	DP_STATS_CLR(vdev);
5083 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5084 		if (!peer)
5085 			return;
5086 		DP_STATS_CLR(peer);
5087 
5088 		if (soc->cdp_soc.ol_ops->update_dp_stats) {
5089 			soc->cdp_soc.ol_ops->update_dp_stats(
5090 					vdev->pdev->osif_pdev,
5091 					&peer->stats,
5092 					peer->peer_ids[0],
5093 					UPDATE_PEER_STATS);
5094 		}
5095 
5096 	}
5097 
5098 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5099 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->osif_pdev,
5100 				&vdev->stats, (uint16_t)vdev->vdev_id,
5101 				UPDATE_VDEV_STATS);
5102 }
5103 
5104 /**
5105  * dp_print_rx_rates(): Print Rx rate stats
5106  * @vdev: DP_VDEV handle
5107  *
5108  * Return:void
5109  */
5110 static inline void
5111 dp_print_rx_rates(struct dp_vdev *vdev)
5112 {
5113 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5114 	uint8_t i, mcs, pkt_type;
5115 	uint8_t index = 0;
5116 	char nss[DP_NSS_LENGTH];
5117 
5118 	DP_PRINT_STATS("Rx Rate Info:\n");
5119 
5120 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5121 		index = 0;
5122 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5123 			if (!dp_rate_string[pkt_type][mcs].valid)
5124 				continue;
5125 
5126 			DP_PRINT_STATS("	%s = %d",
5127 					dp_rate_string[pkt_type][mcs].mcs_type,
5128 					pdev->stats.rx.pkt_type[pkt_type].
5129 					mcs_count[mcs]);
5130 		}
5131 
5132 		DP_PRINT_STATS("\n");
5133 	}
5134 
5135 	index = 0;
5136 	for (i = 0; i < SS_COUNT; i++) {
5137 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5138 				" %d", pdev->stats.rx.nss[i]);
5139 	}
5140 	DP_PRINT_STATS("NSS(1-8) = %s",
5141 			nss);
5142 
5143 	DP_PRINT_STATS("SGI ="
5144 			" 0.8us %d,"
5145 			" 0.4us %d,"
5146 			" 1.6us %d,"
5147 			" 3.2us %d,",
5148 			pdev->stats.rx.sgi_count[0],
5149 			pdev->stats.rx.sgi_count[1],
5150 			pdev->stats.rx.sgi_count[2],
5151 			pdev->stats.rx.sgi_count[3]);
5152 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
5153 			pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
5154 			pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
5155 	DP_PRINT_STATS("Reception Type ="
5156 			" SU: %d,"
5157 			" MU_MIMO:%d,"
5158 			" MU_OFDMA:%d,"
5159 			" MU_OFDMA_MIMO:%d\n",
5160 			pdev->stats.rx.reception_type[0],
5161 			pdev->stats.rx.reception_type[1],
5162 			pdev->stats.rx.reception_type[2],
5163 			pdev->stats.rx.reception_type[3]);
5164 	DP_PRINT_STATS("Aggregation:\n");
5165 	DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
5166 			pdev->stats.rx.ampdu_cnt);
5167 	DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
5168 			pdev->stats.rx.non_ampdu_cnt);
5169 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
5170 			pdev->stats.rx.amsdu_cnt);
5171 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
5172 			pdev->stats.rx.non_amsdu_cnt);
5173 }
5174 
5175 /**
5176  * dp_print_tx_rates(): Print tx rates
5177  * @vdev: DP_VDEV handle
5178  *
5179  * Return:void
5180  */
5181 static inline void
5182 dp_print_tx_rates(struct dp_vdev *vdev)
5183 {
5184 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5185 	uint8_t mcs, pkt_type;
5186 	uint32_t index;
5187 
5188 	DP_PRINT_STATS("Tx Rate Info:\n");
5189 
5190 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5191 		index = 0;
5192 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5193 			if (!dp_rate_string[pkt_type][mcs].valid)
5194 				continue;
5195 
5196 			DP_PRINT_STATS("	%s = %d",
5197 					dp_rate_string[pkt_type][mcs].mcs_type,
5198 					pdev->stats.tx.pkt_type[pkt_type].
5199 					mcs_count[mcs]);
5200 		}
5201 
5202 		DP_PRINT_STATS("\n");
5203 	}
5204 
5205 	DP_PRINT_STATS("SGI ="
5206 			" 0.8us %d"
5207 			" 0.4us %d"
5208 			" 1.6us %d"
5209 			" 3.2us %d",
5210 			pdev->stats.tx.sgi_count[0],
5211 			pdev->stats.tx.sgi_count[1],
5212 			pdev->stats.tx.sgi_count[2],
5213 			pdev->stats.tx.sgi_count[3]);
5214 
5215 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
5216 			pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
5217 			pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
5218 
5219 	DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
5220 	DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
5221 	DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
5222 	DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
5223 	DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
5224 
5225 	DP_PRINT_STATS("Aggregation:\n");
5226 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
5227 			pdev->stats.tx.amsdu_cnt);
5228 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
5229 			pdev->stats.tx.non_amsdu_cnt);
5230 }
5231 
5232 /**
5233  * dp_print_peer_stats():print peer stats
5234  * @peer: DP_PEER handle
5235  *
5236  * return void
5237  */
5238 static inline void dp_print_peer_stats(struct dp_peer *peer)
5239 {
5240 	uint8_t i, mcs, pkt_type;
5241 	uint32_t index;
5242 	char nss[DP_NSS_LENGTH];
5243 	DP_PRINT_STATS("Node Tx Stats:\n");
5244 	DP_PRINT_STATS("Total Packet Completions = %d",
5245 			peer->stats.tx.comp_pkt.num);
5246 	DP_PRINT_STATS("Total Bytes Completions = %llu",
5247 			peer->stats.tx.comp_pkt.bytes);
5248 	DP_PRINT_STATS("Success Packets = %d",
5249 			peer->stats.tx.tx_success.num);
5250 	DP_PRINT_STATS("Success Bytes = %llu",
5251 			peer->stats.tx.tx_success.bytes);
5252 	DP_PRINT_STATS("Unicast Success Packets = %d",
5253 			peer->stats.tx.ucast.num);
5254 	DP_PRINT_STATS("Unicast Success Bytes = %llu",
5255 			peer->stats.tx.ucast.bytes);
5256 	DP_PRINT_STATS("Multicast Success Packets = %d",
5257 			peer->stats.tx.mcast.num);
5258 	DP_PRINT_STATS("Multicast Success Bytes = %llu",
5259 			peer->stats.tx.mcast.bytes);
5260 	DP_PRINT_STATS("Broadcast Success Packets = %d",
5261 			peer->stats.tx.bcast.num);
5262 	DP_PRINT_STATS("Broadcast Success Bytes = %llu",
5263 			peer->stats.tx.bcast.bytes);
5264 	DP_PRINT_STATS("Packets Failed = %d",
5265 			peer->stats.tx.tx_failed);
5266 	DP_PRINT_STATS("Packets In OFDMA = %d",
5267 			peer->stats.tx.ofdma);
5268 	DP_PRINT_STATS("Packets In STBC = %d",
5269 			peer->stats.tx.stbc);
5270 	DP_PRINT_STATS("Packets In LDPC = %d",
5271 			peer->stats.tx.ldpc);
5272 	DP_PRINT_STATS("Packet Retries = %d",
5273 			peer->stats.tx.retries);
5274 	DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
5275 			peer->stats.tx.amsdu_cnt);
5276 	DP_PRINT_STATS("Last Packet RSSI = %d",
5277 			peer->stats.tx.last_ack_rssi);
5278 	DP_PRINT_STATS("Dropped At FW: Removed = %d",
5279 			peer->stats.tx.dropped.fw_rem);
5280 	DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
5281 			peer->stats.tx.dropped.fw_rem_tx);
5282 	DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
5283 			peer->stats.tx.dropped.fw_rem_notx);
5284 	DP_PRINT_STATS("Dropped : Age Out = %d",
5285 			peer->stats.tx.dropped.age_out);
5286 	DP_PRINT_STATS("NAWDS : ");
5287 	DP_PRINT_STATS("	Nawds multicast Drop Tx Packet = %d",
5288 			peer->stats.tx.nawds_mcast_drop);
5289 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Count = %d",
5290 			peer->stats.tx.nawds_mcast.num);
5291 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Bytes = %llu",
5292 			peer->stats.tx.nawds_mcast.bytes);
5293 
5294 	DP_PRINT_STATS("Rate Info:");
5295 
5296 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5297 		index = 0;
5298 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5299 			if (!dp_rate_string[pkt_type][mcs].valid)
5300 				continue;
5301 
5302 			DP_PRINT_STATS("	%s = %d",
5303 					dp_rate_string[pkt_type][mcs].mcs_type,
5304 					peer->stats.tx.pkt_type[pkt_type].
5305 					mcs_count[mcs]);
5306 		}
5307 
5308 		DP_PRINT_STATS("\n");
5309 	}
5310 
5311 	DP_PRINT_STATS("SGI = "
5312 			" 0.8us %d"
5313 			" 0.4us %d"
5314 			" 1.6us %d"
5315 			" 3.2us %d",
5316 			peer->stats.tx.sgi_count[0],
5317 			peer->stats.tx.sgi_count[1],
5318 			peer->stats.tx.sgi_count[2],
5319 			peer->stats.tx.sgi_count[3]);
5320 	DP_PRINT_STATS("Excess Retries per AC ");
5321 	DP_PRINT_STATS("	 Best effort = %d",
5322 			peer->stats.tx.excess_retries_per_ac[0]);
5323 	DP_PRINT_STATS("	 Background= %d",
5324 			peer->stats.tx.excess_retries_per_ac[1]);
5325 	DP_PRINT_STATS("	 Video = %d",
5326 			peer->stats.tx.excess_retries_per_ac[2]);
5327 	DP_PRINT_STATS("	 Voice = %d",
5328 			peer->stats.tx.excess_retries_per_ac[3]);
5329 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
5330 			peer->stats.tx.bw[2], peer->stats.tx.bw[3],
5331 			peer->stats.tx.bw[4], peer->stats.tx.bw[5]);
5332 
5333 	index = 0;
5334 	for (i = 0; i < SS_COUNT; i++) {
5335 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5336 				" %d", peer->stats.tx.nss[i]);
5337 	}
5338 	DP_PRINT_STATS("NSS(1-8) = %s",
5339 			nss);
5340 
5341 	DP_PRINT_STATS("Aggregation:");
5342 	DP_PRINT_STATS("	Number of Msdu's Part of Amsdu = %d",
5343 			peer->stats.tx.amsdu_cnt);
5344 	DP_PRINT_STATS("	Number of Msdu's With No Msdu Level Aggregation = %d\n",
5345 			peer->stats.tx.non_amsdu_cnt);
5346 
5347 	DP_PRINT_STATS("Node Rx Stats:");
5348 	DP_PRINT_STATS("Packets Sent To Stack = %d",
5349 			peer->stats.rx.to_stack.num);
5350 	DP_PRINT_STATS("Bytes Sent To Stack = %llu",
5351 			peer->stats.rx.to_stack.bytes);
5352 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
5353 		DP_PRINT_STATS("Ring Id = %d", i);
5354 		DP_PRINT_STATS("	Packets Received = %d",
5355 				peer->stats.rx.rcvd_reo[i].num);
5356 		DP_PRINT_STATS("	Bytes Received = %llu",
5357 				peer->stats.rx.rcvd_reo[i].bytes);
5358 	}
5359 	DP_PRINT_STATS("Multicast Packets Received = %d",
5360 			peer->stats.rx.multicast.num);
5361 	DP_PRINT_STATS("Multicast Bytes Received = %llu",
5362 			peer->stats.rx.multicast.bytes);
5363 	DP_PRINT_STATS("Broadcast Packets Received = %d",
5364 			peer->stats.rx.bcast.num);
5365 	DP_PRINT_STATS("Broadcast Bytes Received = %llu",
5366 			peer->stats.rx.bcast.bytes);
5367 	DP_PRINT_STATS("Intra BSS Packets Received = %d",
5368 			peer->stats.rx.intra_bss.pkts.num);
5369 	DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
5370 			peer->stats.rx.intra_bss.pkts.bytes);
5371 	DP_PRINT_STATS("Raw Packets Received = %d",
5372 			peer->stats.rx.raw.num);
5373 	DP_PRINT_STATS("Raw Bytes Received = %llu",
5374 			peer->stats.rx.raw.bytes);
5375 	DP_PRINT_STATS("Errors: MIC Errors = %d",
5376 			peer->stats.rx.err.mic_err);
5377 	DP_PRINT_STATS("Erros: Decryption Errors = %d",
5378 			peer->stats.rx.err.decrypt_err);
5379 	DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
5380 			peer->stats.rx.non_ampdu_cnt);
5381 	DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
5382 			peer->stats.rx.ampdu_cnt);
5383 	DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
5384 			peer->stats.rx.non_amsdu_cnt);
5385 	DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
5386 			peer->stats.rx.amsdu_cnt);
5387 	DP_PRINT_STATS("NAWDS : ");
5388 	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet = %d",
5389 			peer->stats.rx.nawds_mcast_drop.num);
5390 	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet Bytes = %llu",
5391 			peer->stats.rx.nawds_mcast_drop.bytes);
5392 	DP_PRINT_STATS("SGI ="
5393 			" 0.8us %d"
5394 			" 0.4us %d"
5395 			" 1.6us %d"
5396 			" 3.2us %d",
5397 			peer->stats.rx.sgi_count[0],
5398 			peer->stats.rx.sgi_count[1],
5399 			peer->stats.rx.sgi_count[2],
5400 			peer->stats.rx.sgi_count[3]);
5401 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
5402 			peer->stats.rx.bw[0], peer->stats.rx.bw[1],
5403 			peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
5404 	DP_PRINT_STATS("Reception Type ="
5405 			" SU %d,"
5406 			" MU_MIMO %d,"
5407 			" MU_OFDMA %d,"
5408 			" MU_OFDMA_MIMO %d",
5409 			peer->stats.rx.reception_type[0],
5410 			peer->stats.rx.reception_type[1],
5411 			peer->stats.rx.reception_type[2],
5412 			peer->stats.rx.reception_type[3]);
5413 
5414 
5415 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5416 		index = 0;
5417 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5418 			if (!dp_rate_string[pkt_type][mcs].valid)
5419 				continue;
5420 
5421 			DP_PRINT_STATS("	%s = %d",
5422 					dp_rate_string[pkt_type][mcs].mcs_type,
5423 					peer->stats.rx.pkt_type[pkt_type].
5424 					mcs_count[mcs]);
5425 		}
5426 
5427 		DP_PRINT_STATS("\n");
5428 	}
5429 
5430 	index = 0;
5431 	for (i = 0; i < SS_COUNT; i++) {
5432 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5433 				" %d", peer->stats.rx.nss[i]);
5434 	}
5435 	DP_PRINT_STATS("NSS(1-8) = %s",
5436 			nss);
5437 
5438 	DP_PRINT_STATS("Aggregation:");
5439 	DP_PRINT_STATS("	Msdu's Part of Ampdu = %d",
5440 			peer->stats.rx.ampdu_cnt);
5441 	DP_PRINT_STATS("	Msdu's With No Mpdu Level Aggregation = %d",
5442 			peer->stats.rx.non_ampdu_cnt);
5443 	DP_PRINT_STATS("	Msdu's Part of Amsdu = %d",
5444 			peer->stats.rx.amsdu_cnt);
5445 	DP_PRINT_STATS("	Msdu's With No Msdu Level Aggregation = %d",
5446 			peer->stats.rx.non_amsdu_cnt);
5447 }
5448 
5449 /**
5450  * dp_print_host_stats()- Function to print the stats aggregated at host
5451  * @vdev_handle: DP_VDEV handle
5452  * @type: host stats type
5453  *
5454  * Available Stat types
5455  * TXRX_CLEAR_STATS  : Clear the stats
5456  * TXRX_RX_RATE_STATS: Print Rx Rate Info
5457  * TXRX_TX_RATE_STATS: Print Tx Rate Info
5458  * TXRX_TX_HOST_STATS: Print Tx Stats
5459  * TXRX_RX_HOST_STATS: Print Rx Stats
5460  * TXRX_AST_STATS: Print AST Stats
5461  * TXRX_SRNG_PTR_STATS: Print SRNG ring pointer stats
5462  *
5463  * Return: 0 on success, print error message in case of failure
5464  */
5465 static int
5466 dp_print_host_stats(struct cdp_vdev *vdev_handle, enum cdp_host_txrx_stats type)
5467 {
5468 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5469 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5470 
5471 	dp_aggregate_pdev_stats(pdev);
5472 
5473 	switch (type) {
5474 	case TXRX_CLEAR_STATS:
5475 		dp_txrx_host_stats_clr(vdev);
5476 		break;
5477 	case TXRX_RX_RATE_STATS:
5478 		dp_print_rx_rates(vdev);
5479 		break;
5480 	case TXRX_TX_RATE_STATS:
5481 		dp_print_tx_rates(vdev);
5482 		break;
5483 	case TXRX_TX_HOST_STATS:
5484 		dp_print_pdev_tx_stats(pdev);
5485 		dp_print_soc_tx_stats(pdev->soc);
5486 		break;
5487 	case TXRX_RX_HOST_STATS:
5488 		dp_print_pdev_rx_stats(pdev);
5489 		dp_print_soc_rx_stats(pdev->soc);
5490 		break;
5491 	case TXRX_AST_STATS:
5492 		dp_print_ast_stats(pdev->soc);
5493 		break;
5494 	case TXRX_SRNG_PTR_STATS:
5495 		 dp_print_ring_stats(pdev);
5496 		 break;
5497 	default:
5498 		DP_TRACE(FATAL, "Wrong Input For TxRx Host Stats");
5499 		break;
5500 	}
5501 	return 0;
5502 }
5503 
5504 /*
5505  * dp_get_host_peer_stats()- function to print peer stats
5506  * @pdev_handle: DP_PDEV handle
5507  * @mac_addr: mac address of the peer
5508  *
5509  * Return: void
5510  */
5511 static void
5512 dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
5513 {
5514 	struct dp_peer *peer;
5515 	uint8_t local_id;
5516 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
5517 			&local_id);
5518 
5519 	if (!peer) {
5520 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5521 			"%s: Invalid peer\n", __func__);
5522 		return;
5523 	}
5524 
5525 	dp_print_peer_stats(peer);
5526 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
5527 	return;
5528 }
5529 
5530 /*
5531  * dp_ppdu_ring_reset()- Reset PPDU Stats ring
5532  * @pdev: DP_PDEV handle
5533  *
5534  * Return: void
5535  */
5536 static void
5537 dp_ppdu_ring_reset(struct dp_pdev *pdev)
5538 {
5539 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5540 
5541 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5542 
5543 	htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, pdev->pdev_id,
5544 		pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
5545 		RX_BUFFER_SIZE, &htt_tlv_filter);
5546 
5547 }
5548 
5549 /*
5550  * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
5551  * @pdev: DP_PDEV handle
5552  *
5553  * Return: void
5554  */
5555 static void
5556 dp_ppdu_ring_cfg(struct dp_pdev *pdev)
5557 {
5558 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
5559 
5560 	htt_tlv_filter.mpdu_start = 0;
5561 	htt_tlv_filter.msdu_start = 0;
5562 	htt_tlv_filter.packet = 0;
5563 	htt_tlv_filter.msdu_end = 0;
5564 	htt_tlv_filter.mpdu_end = 0;
5565 	htt_tlv_filter.packet_header = 1;
5566 	htt_tlv_filter.attention = 1;
5567 	htt_tlv_filter.ppdu_start = 1;
5568 	htt_tlv_filter.ppdu_end = 1;
5569 	htt_tlv_filter.ppdu_end_user_stats = 1;
5570 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5571 	htt_tlv_filter.ppdu_end_status_done = 1;
5572 	htt_tlv_filter.enable_fp = 1;
5573 	htt_tlv_filter.enable_md = 0;
5574 	if (pdev->mcopy_mode)
5575 		htt_tlv_filter.enable_mo = 1;
5576 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5577 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5578 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5579 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5580 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5581 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
5582 
5583 	htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, pdev->pdev_id,
5584 		pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
5585 		RX_BUFFER_SIZE, &htt_tlv_filter);
5586 }
5587 
5588 /*
5589  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
5590  * @pdev_handle: DP_PDEV handle
5591  * @val: user provided value
5592  *
5593  * Return: void
5594  */
5595 static void
5596 dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
5597 {
5598 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5599 
5600 	switch (val) {
5601 	case 0:
5602 		pdev->tx_sniffer_enable = 0;
5603 		pdev->mcopy_mode = 0;
5604 
5605 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en) {
5606 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
5607 			dp_ppdu_ring_reset(pdev);
5608 		} else if (pdev->enhanced_stats_en) {
5609 			dp_h2t_cfg_stats_msg_send(pdev,
5610 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
5611 		}
5612 		break;
5613 
5614 	case 1:
5615 		pdev->tx_sniffer_enable = 1;
5616 		pdev->mcopy_mode = 0;
5617 
5618 		if (!pdev->pktlog_ppdu_stats)
5619 			dp_h2t_cfg_stats_msg_send(pdev,
5620 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
5621 		break;
5622 	case 2:
5623 		pdev->mcopy_mode = 1;
5624 		pdev->tx_sniffer_enable = 0;
5625 		if (!pdev->enhanced_stats_en)
5626 			dp_ppdu_ring_cfg(pdev);
5627 
5628 		if (!pdev->pktlog_ppdu_stats)
5629 			dp_h2t_cfg_stats_msg_send(pdev,
5630 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
5631 		break;
5632 	default:
5633 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5634 			"Invalid value\n");
5635 		break;
5636 	}
5637 }
5638 
5639 /*
5640  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
5641  * @pdev_handle: DP_PDEV handle
5642  *
5643  * Return: void
5644  */
5645 static void
5646 dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
5647 {
5648 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5649 	pdev->enhanced_stats_en = 1;
5650 
5651 	if (!pdev->mcopy_mode)
5652 		dp_ppdu_ring_cfg(pdev);
5653 
5654 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable && !pdev->mcopy_mode)
5655 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
5656 }
5657 
5658 /*
5659  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
5660  * @pdev_handle: DP_PDEV handle
5661  *
5662  * Return: void
5663  */
5664 static void
5665 dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
5666 {
5667 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5668 
5669 	pdev->enhanced_stats_en = 0;
5670 
5671 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable && !pdev->mcopy_mode)
5672 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
5673 
5674 	if (!pdev->mcopy_mode)
5675 		dp_ppdu_ring_reset(pdev);
5676 }
5677 
5678 /*
5679  * dp_get_fw_peer_stats()- function to print peer stats
5680  * @pdev_handle: DP_PDEV handle
5681  * @mac_addr: mac address of the peer
5682  * @cap: Type of htt stats requested
5683  *
5684  * Currently Supporting only MAC ID based requests Only
5685  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
5686  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
5687  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
5688  *
5689  * Return: void
5690  */
5691 static void
5692 dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
5693 		uint32_t cap)
5694 {
5695 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5696 	int i;
5697 	uint32_t config_param0 = 0;
5698 	uint32_t config_param1 = 0;
5699 	uint32_t config_param2 = 0;
5700 	uint32_t config_param3 = 0;
5701 
5702 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
5703 	config_param0 |= (1 << (cap + 1));
5704 
5705 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
5706 		config_param1 |= (1 << i);
5707 	}
5708 
5709 	config_param2 |= (mac_addr[0] & 0x000000ff);
5710 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
5711 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
5712 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
5713 
5714 	config_param3 |= (mac_addr[4] & 0x000000ff);
5715 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
5716 
5717 	dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
5718 			config_param0, config_param1, config_param2,
5719 			config_param3, 0, 0, 0);
5720 
5721 }
5722 
5723 /* This struct definition will be removed from here
5724  * once it get added in FW headers*/
5725 struct httstats_cmd_req {
5726     uint32_t    config_param0;
5727     uint32_t    config_param1;
5728     uint32_t    config_param2;
5729     uint32_t    config_param3;
5730     int cookie;
5731     u_int8_t    stats_id;
5732 };
5733 
5734 /*
5735  * dp_get_htt_stats: function to process the httstas request
5736  * @pdev_handle: DP pdev handle
5737  * @data: pointer to request data
5738  * @data_len: length for request data
5739  *
5740  * return: void
5741  */
5742 static void
5743 dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
5744 {
5745 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5746 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
5747 
5748 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
5749 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
5750 				req->config_param0, req->config_param1,
5751 				req->config_param2, req->config_param3,
5752 				req->cookie, 0, 0);
5753 }
5754 /*
5755  * dp_set_pdev_param: function to set parameters in pdev
5756  * @pdev_handle: DP pdev handle
5757  * @param: parameter type to be set
5758  * @val: value of parameter to be set
5759  *
5760  * return: void
5761  */
5762 static void dp_set_pdev_param(struct cdp_pdev *pdev_handle,
5763 		enum cdp_pdev_param_type param, uint8_t val)
5764 {
5765 	switch (param) {
5766 	case CDP_CONFIG_DEBUG_SNIFFER:
5767 		dp_config_debug_sniffer(pdev_handle, val);
5768 		break;
5769 	default:
5770 		break;
5771 	}
5772 }
5773 
5774 /*
5775  * dp_set_vdev_param: function to set parameters in vdev
5776  * @param: parameter type to be set
5777  * @val: value of parameter to be set
5778  *
5779  * return: void
5780  */
5781 static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
5782 		enum cdp_vdev_param_type param, uint32_t val)
5783 {
5784 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5785 	switch (param) {
5786 	case CDP_ENABLE_WDS:
5787 		vdev->wds_enabled = val;
5788 		break;
5789 	case CDP_ENABLE_NAWDS:
5790 		vdev->nawds_enabled = val;
5791 		break;
5792 	case CDP_ENABLE_MCAST_EN:
5793 		vdev->mcast_enhancement_en = val;
5794 		break;
5795 	case CDP_ENABLE_PROXYSTA:
5796 		vdev->proxysta_vdev = val;
5797 		break;
5798 	case CDP_UPDATE_TDLS_FLAGS:
5799 		vdev->tdls_link_connected = val;
5800 		break;
5801 	case CDP_CFG_WDS_AGING_TIMER:
5802 		if (val == 0)
5803 			qdf_timer_stop(&vdev->pdev->soc->wds_aging_timer);
5804 		else if (val != vdev->wds_aging_timer_val)
5805 			qdf_timer_mod(&vdev->pdev->soc->wds_aging_timer, val);
5806 
5807 		vdev->wds_aging_timer_val = val;
5808 		break;
5809 	case CDP_ENABLE_AP_BRIDGE:
5810 		if (wlan_op_mode_sta != vdev->opmode)
5811 			vdev->ap_bridge_enabled = val;
5812 		else
5813 			vdev->ap_bridge_enabled = false;
5814 		break;
5815 	case CDP_ENABLE_CIPHER:
5816 		vdev->sec_type = val;
5817 		break;
5818 	case CDP_ENABLE_QWRAP_ISOLATION:
5819 		vdev->isolation_vdev = val;
5820 		break;
5821 	default:
5822 		break;
5823 	}
5824 
5825 	dp_tx_vdev_update_search_flags(vdev);
5826 }
5827 
5828 /**
5829  * dp_peer_set_nawds: set nawds bit in peer
5830  * @peer_handle: pointer to peer
5831  * @value: enable/disable nawds
5832  *
5833  * return: void
5834  */
5835 static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
5836 {
5837 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5838 	peer->nawds_enabled = value;
5839 }
5840 
5841 /*
5842  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
5843  * @vdev_handle: DP_VDEV handle
5844  * @map_id:ID of map that needs to be updated
5845  *
5846  * Return: void
5847  */
5848 static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
5849 		uint8_t map_id)
5850 {
5851 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5852 	vdev->dscp_tid_map_id = map_id;
5853 	return;
5854 }
5855 
5856 /*
5857  * dp_txrx_stats_publish(): publish pdev stats into a buffer
5858  * @pdev_handle: DP_PDEV handle
5859  * @buf: to hold pdev_stats
5860  *
5861  * Return: int
5862  */
5863 static int
5864 dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
5865 {
5866 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5867 	struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
5868 	struct cdp_txrx_stats_req req = {0,};
5869 
5870 	dp_aggregate_pdev_stats(pdev);
5871 	req.stats = HTT_DBG_EXT_STATS_PDEV_TX;
5872 	req.cookie_val = 1;
5873 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
5874 				req.param1, req.param2, req.param3, 0,
5875 				req.cookie_val, 0);
5876 
5877 	msleep(DP_MAX_SLEEP_TIME);
5878 
5879 	req.stats = HTT_DBG_EXT_STATS_PDEV_RX;
5880 	req.cookie_val = 1;
5881 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
5882 				req.param1, req.param2, req.param3, 0,
5883 				req.cookie_val, 0);
5884 
5885 	msleep(DP_MAX_SLEEP_TIME);
5886 	qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
5887 
5888 	return TXRX_STATS_LEVEL;
5889 }
5890 
5891 /**
5892  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
5893  * @pdev: DP_PDEV handle
5894  * @map_id: ID of map that needs to be updated
5895  * @tos: index value in map
5896  * @tid: tid value passed by the user
5897  *
5898  * Return: void
5899  */
5900 static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
5901 		uint8_t map_id, uint8_t tos, uint8_t tid)
5902 {
5903 	uint8_t dscp;
5904 	struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
5905 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
5906 	pdev->dscp_tid_map[map_id][dscp] = tid;
5907 	if (map_id < HAL_MAX_HW_DSCP_TID_MAPS)
5908 		hal_tx_update_dscp_tid(pdev->soc->hal_soc, tid,
5909 			map_id, dscp);
5910 	return;
5911 }
5912 
5913 /**
5914  * dp_fw_stats_process(): Process TxRX FW stats request
5915  * @vdev_handle: DP VDEV handle
5916  * @req: stats request
5917  *
5918  * return: int
5919  */
5920 static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
5921 		struct cdp_txrx_stats_req *req)
5922 {
5923 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5924 	struct dp_pdev *pdev = NULL;
5925 	uint32_t stats = req->stats;
5926 	uint8_t channel = req->channel;
5927 
5928 	if (!vdev) {
5929 		DP_TRACE(NONE, "VDEV not found");
5930 		return 1;
5931 	}
5932 	pdev = vdev->pdev;
5933 
5934 	/*
5935 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
5936 	 * from param0 to param3 according to below rule:
5937 	 *
5938 	 * PARAM:
5939 	 *   - config_param0 : start_offset (stats type)
5940 	 *   - config_param1 : stats bmask from start offset
5941 	 *   - config_param2 : stats bmask from start offset + 32
5942 	 *   - config_param3 : stats bmask from start offset + 64
5943 	 */
5944 	if (req->stats == CDP_TXRX_STATS_0) {
5945 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
5946 		req->param1 = 0xFFFFFFFF;
5947 		req->param2 = 0xFFFFFFFF;
5948 		req->param3 = 0xFFFFFFFF;
5949 	}
5950 
5951 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
5952 				req->param1, req->param2, req->param3,
5953 				0, 0, channel);
5954 }
5955 
5956 /**
5957  * dp_txrx_stats_request - function to map to firmware and host stats
5958  * @vdev: virtual handle
5959  * @req: stats request
5960  *
5961  * Return: integer
5962  */
5963 static int dp_txrx_stats_request(struct cdp_vdev *vdev,
5964 		struct cdp_txrx_stats_req *req)
5965 {
5966 	int host_stats;
5967 	int fw_stats;
5968 	enum cdp_stats stats;
5969 
5970 	if (!vdev || !req) {
5971 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5972 				"Invalid vdev/req instance");
5973 		return 0;
5974 	}
5975 
5976 	stats = req->stats;
5977 	if (stats >= CDP_TXRX_MAX_STATS)
5978 		return 0;
5979 
5980 	/*
5981 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
5982 	 *			has to be updated if new FW HTT stats added
5983 	 */
5984 	if (stats > CDP_TXRX_STATS_HTT_MAX)
5985 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
5986 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
5987 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
5988 
5989 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5990 		 "stats: %u fw_stats_type: %d host_stats_type: %d",
5991 		  stats, fw_stats, host_stats);
5992 
5993 	if (fw_stats != TXRX_FW_STATS_INVALID) {
5994 		/* update request with FW stats type */
5995 		req->stats = fw_stats;
5996 		return dp_fw_stats_process(vdev, req);
5997 	}
5998 
5999 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
6000 			(host_stats <= TXRX_HOST_STATS_MAX))
6001 		return dp_print_host_stats(vdev, host_stats);
6002 	else
6003 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6004 				"Wrong Input for TxRx Stats");
6005 
6006 	return 0;
6007 }
6008 
6009 /*
6010  * dp_print_napi_stats(): NAPI stats
6011  * @soc - soc handle
6012  */
6013 static void dp_print_napi_stats(struct dp_soc *soc)
6014 {
6015 	hif_print_napi_stats(soc->hif_handle);
6016 }
6017 
6018 /*
6019  * dp_print_per_ring_stats(): Packet count per ring
6020  * @soc - soc handle
6021  */
6022 static void dp_print_per_ring_stats(struct dp_soc *soc)
6023 {
6024 	uint8_t ring;
6025 	uint16_t core;
6026 	uint64_t total_packets;
6027 
6028 	DP_TRACE(FATAL, "Reo packets per ring:");
6029 	for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
6030 		total_packets = 0;
6031 		DP_TRACE(FATAL, "Packets on ring %u:", ring);
6032 		for (core = 0; core < NR_CPUS; core++) {
6033 			DP_TRACE(FATAL, "Packets arriving on core %u: %llu",
6034 				core, soc->stats.rx.ring_packets[core][ring]);
6035 			total_packets += soc->stats.rx.ring_packets[core][ring];
6036 		}
6037 		DP_TRACE(FATAL, "Total packets on ring %u: %llu",
6038 			ring, total_packets);
6039 	}
6040 }
6041 
6042 /*
6043  * dp_txrx_path_stats() - Function to display dump stats
6044  * @soc - soc handle
6045  *
6046  * return: none
6047  */
6048 static void dp_txrx_path_stats(struct dp_soc *soc)
6049 {
6050 	uint8_t error_code;
6051 	uint8_t loop_pdev;
6052 	struct dp_pdev *pdev;
6053 	uint8_t i;
6054 
6055 	for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
6056 
6057 		pdev = soc->pdev_list[loop_pdev];
6058 		dp_aggregate_pdev_stats(pdev);
6059 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6060 			"Tx path Statistics:");
6061 
6062 		DP_TRACE(FATAL, "from stack: %u msdus (%llu bytes)",
6063 			pdev->stats.tx_i.rcvd.num,
6064 			pdev->stats.tx_i.rcvd.bytes);
6065 		DP_TRACE(FATAL, "processed from host: %u msdus (%llu bytes)",
6066 			pdev->stats.tx_i.processed.num,
6067 			pdev->stats.tx_i.processed.bytes);
6068 		DP_TRACE(FATAL, "successfully transmitted: %u msdus (%llu bytes)",
6069 			pdev->stats.tx.tx_success.num,
6070 			pdev->stats.tx.tx_success.bytes);
6071 
6072 		DP_TRACE(FATAL, "Dropped in host:");
6073 		DP_TRACE(FATAL, "Total packets dropped: %u,",
6074 			pdev->stats.tx_i.dropped.dropped_pkt.num);
6075 		DP_TRACE(FATAL, "Descriptor not available: %u",
6076 			pdev->stats.tx_i.dropped.desc_na);
6077 		DP_TRACE(FATAL, "Ring full: %u",
6078 			pdev->stats.tx_i.dropped.ring_full);
6079 		DP_TRACE(FATAL, "Enqueue fail: %u",
6080 			pdev->stats.tx_i.dropped.enqueue_fail);
6081 		DP_TRACE(FATAL, "DMA Error: %u",
6082 			pdev->stats.tx_i.dropped.dma_error);
6083 
6084 		DP_TRACE(FATAL, "Dropped in hardware:");
6085 		DP_TRACE(FATAL, "total packets dropped: %u",
6086 			pdev->stats.tx.tx_failed);
6087 		DP_TRACE(FATAL, "mpdu age out: %u",
6088 			pdev->stats.tx.dropped.age_out);
6089 		DP_TRACE(FATAL, "firmware removed: %u",
6090 			pdev->stats.tx.dropped.fw_rem);
6091 		DP_TRACE(FATAL, "firmware removed tx: %u",
6092 			pdev->stats.tx.dropped.fw_rem_tx);
6093 		DP_TRACE(FATAL, "firmware removed notx %u",
6094 			pdev->stats.tx.dropped.fw_rem_notx);
6095 		DP_TRACE(FATAL, "peer_invalid: %u",
6096 			pdev->soc->stats.tx.tx_invalid_peer.num);
6097 
6098 
6099 		DP_TRACE(FATAL, "Tx packets sent per interrupt:");
6100 		DP_TRACE(FATAL, "Single Packet: %u",
6101 			pdev->stats.tx_comp_histogram.pkts_1);
6102 		DP_TRACE(FATAL, "2-20 Packets:  %u",
6103 			pdev->stats.tx_comp_histogram.pkts_2_20);
6104 		DP_TRACE(FATAL, "21-40 Packets: %u",
6105 			pdev->stats.tx_comp_histogram.pkts_21_40);
6106 		DP_TRACE(FATAL, "41-60 Packets: %u",
6107 			pdev->stats.tx_comp_histogram.pkts_41_60);
6108 		DP_TRACE(FATAL, "61-80 Packets: %u",
6109 			pdev->stats.tx_comp_histogram.pkts_61_80);
6110 		DP_TRACE(FATAL, "81-100 Packets: %u",
6111 			pdev->stats.tx_comp_histogram.pkts_81_100);
6112 		DP_TRACE(FATAL, "101-200 Packets: %u",
6113 			pdev->stats.tx_comp_histogram.pkts_101_200);
6114 		DP_TRACE(FATAL, "   201+ Packets: %u",
6115 			pdev->stats.tx_comp_histogram.pkts_201_plus);
6116 
6117 		DP_TRACE(FATAL, "Rx path statistics");
6118 
6119 		DP_TRACE(FATAL, "delivered %u msdus ( %llu bytes),",
6120 			pdev->stats.rx.to_stack.num,
6121 			pdev->stats.rx.to_stack.bytes);
6122 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)
6123 			DP_TRACE(FATAL, "received on reo[%d] %u msdus ( %llu bytes),",
6124 					i, pdev->stats.rx.rcvd_reo[i].num,
6125 					pdev->stats.rx.rcvd_reo[i].bytes);
6126 		DP_TRACE(FATAL, "intra-bss packets %u msdus ( %llu bytes),",
6127 			pdev->stats.rx.intra_bss.pkts.num,
6128 			pdev->stats.rx.intra_bss.pkts.bytes);
6129 		DP_TRACE(FATAL, "intra-bss fails %u msdus ( %llu bytes),",
6130 			pdev->stats.rx.intra_bss.fail.num,
6131 			pdev->stats.rx.intra_bss.fail.bytes);
6132 		DP_TRACE(FATAL, "raw packets %u msdus ( %llu bytes),",
6133 			pdev->stats.rx.raw.num,
6134 			pdev->stats.rx.raw.bytes);
6135 		DP_TRACE(FATAL, "dropped: error %u msdus",
6136 			pdev->stats.rx.err.mic_err);
6137 		DP_TRACE(FATAL, "peer invalid %u",
6138 			pdev->soc->stats.rx.err.rx_invalid_peer.num);
6139 
6140 		DP_TRACE(FATAL, "Reo Statistics");
6141 		DP_TRACE(FATAL, "rbm error: %u msdus",
6142 			pdev->soc->stats.rx.err.invalid_rbm);
6143 		DP_TRACE(FATAL, "hal ring access fail: %u msdus",
6144 			pdev->soc->stats.rx.err.hal_ring_access_fail);
6145 
6146 		DP_TRACE(FATAL, "Reo errors");
6147 
6148 		for (error_code = 0; error_code < HAL_REO_ERR_MAX;
6149 				error_code++) {
6150 			DP_TRACE(FATAL, "Reo error number (%u): %u msdus",
6151 				error_code,
6152 				pdev->soc->stats.rx.err.reo_error[error_code]);
6153 		}
6154 
6155 		for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
6156 				error_code++) {
6157 			DP_TRACE(FATAL, "Rxdma error number (%u): %u msdus",
6158 				error_code,
6159 				pdev->soc->stats.rx.err
6160 				.rxdma_error[error_code]);
6161 		}
6162 
6163 		DP_TRACE(FATAL, "Rx packets reaped per interrupt:");
6164 		DP_TRACE(FATAL, "Single Packet: %u",
6165 			 pdev->stats.rx_ind_histogram.pkts_1);
6166 		DP_TRACE(FATAL, "2-20 Packets:  %u",
6167 			 pdev->stats.rx_ind_histogram.pkts_2_20);
6168 		DP_TRACE(FATAL, "21-40 Packets: %u",
6169 			 pdev->stats.rx_ind_histogram.pkts_21_40);
6170 		DP_TRACE(FATAL, "41-60 Packets: %u",
6171 			 pdev->stats.rx_ind_histogram.pkts_41_60);
6172 		DP_TRACE(FATAL, "61-80 Packets: %u",
6173 			 pdev->stats.rx_ind_histogram.pkts_61_80);
6174 		DP_TRACE(FATAL, "81-100 Packets: %u",
6175 			 pdev->stats.rx_ind_histogram.pkts_81_100);
6176 		DP_TRACE(FATAL, "101-200 Packets: %u",
6177 			 pdev->stats.rx_ind_histogram.pkts_101_200);
6178 		DP_TRACE(FATAL, "   201+ Packets: %u",
6179 			 pdev->stats.rx_ind_histogram.pkts_201_plus);
6180 
6181 		DP_TRACE_STATS(ERROR, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
6182 			__func__,
6183 			pdev->soc->wlan_cfg_ctx->tso_enabled,
6184 			pdev->soc->wlan_cfg_ctx->lro_enabled,
6185 			pdev->soc->wlan_cfg_ctx->rx_hash,
6186 			pdev->soc->wlan_cfg_ctx->napi_enabled);
6187 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6188 		DP_TRACE_STATS(ERROR, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
6189 			__func__,
6190 			pdev->soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold,
6191 			pdev->soc->wlan_cfg_ctx->tx_flow_start_queue_offset);
6192 #endif
6193 	}
6194 }
6195 
6196 /*
6197  * dp_txrx_dump_stats() -  Dump statistics
6198  * @value - Statistics option
6199  */
6200 static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
6201 				     enum qdf_stats_verbosity_level level)
6202 {
6203 	struct dp_soc *soc =
6204 		(struct dp_soc *)psoc;
6205 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6206 
6207 	if (!soc) {
6208 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6209 			"%s: soc is NULL", __func__);
6210 		return QDF_STATUS_E_INVAL;
6211 	}
6212 
6213 	switch (value) {
6214 	case CDP_TXRX_PATH_STATS:
6215 		dp_txrx_path_stats(soc);
6216 		break;
6217 
6218 	case CDP_RX_RING_STATS:
6219 		dp_print_per_ring_stats(soc);
6220 		break;
6221 
6222 	case CDP_TXRX_TSO_STATS:
6223 		/* TODO: NOT IMPLEMENTED */
6224 		break;
6225 
6226 	case CDP_DUMP_TX_FLOW_POOL_INFO:
6227 		cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
6228 		break;
6229 
6230 	case CDP_DP_NAPI_STATS:
6231 		dp_print_napi_stats(soc);
6232 		break;
6233 
6234 	case CDP_TXRX_DESC_STATS:
6235 		/* TODO: NOT IMPLEMENTED */
6236 		break;
6237 
6238 	default:
6239 		status = QDF_STATUS_E_INVAL;
6240 		break;
6241 	}
6242 
6243 	return status;
6244 
6245 }
6246 
6247 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6248 /**
6249  * dp_update_flow_control_parameters() - API to store datapath
6250  *                            config parameters
6251  * @soc: soc handle
6252  * @cfg: ini parameter handle
6253  *
6254  * Return: void
6255  */
6256 static inline
6257 void dp_update_flow_control_parameters(struct dp_soc *soc,
6258 				struct cdp_config_params *params)
6259 {
6260 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
6261 					params->tx_flow_stop_queue_threshold;
6262 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
6263 					params->tx_flow_start_queue_offset;
6264 }
6265 #else
6266 static inline
6267 void dp_update_flow_control_parameters(struct dp_soc *soc,
6268 				struct cdp_config_params *params)
6269 {
6270 }
6271 #endif
6272 
6273 /**
6274  * dp_update_config_parameters() - API to store datapath
6275  *                            config parameters
6276  * @soc: soc handle
6277  * @cfg: ini parameter handle
6278  *
6279  * Return: status
6280  */
6281 static
6282 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
6283 				struct cdp_config_params *params)
6284 {
6285 	struct dp_soc *soc = (struct dp_soc *)psoc;
6286 
6287 	if (!(soc)) {
6288 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6289 				"%s: Invalid handle", __func__);
6290 		return QDF_STATUS_E_INVAL;
6291 	}
6292 
6293 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
6294 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
6295 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
6296 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
6297 				params->tcp_udp_checksumoffload;
6298 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
6299 
6300 	dp_update_flow_control_parameters(soc, params);
6301 
6302 	return QDF_STATUS_SUCCESS;
6303 }
6304 
6305 /**
6306  * dp_txrx_set_wds_rx_policy() - API to store datapath
6307  *                            config parameters
6308  * @vdev_handle - datapath vdev handle
6309  * @cfg: ini parameter handle
6310  *
6311  * Return: status
6312  */
6313 #ifdef WDS_VENDOR_EXTENSION
6314 void
6315 dp_txrx_set_wds_rx_policy(
6316 		struct cdp_vdev *vdev_handle,
6317 		u_int32_t val)
6318 {
6319 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6320 	struct dp_peer *peer;
6321 	if (vdev->opmode == wlan_op_mode_ap) {
6322 		/* for ap, set it on bss_peer */
6323 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6324 			if (peer->bss_peer) {
6325 				peer->wds_ecm.wds_rx_filter = 1;
6326 				peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
6327 				peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
6328 				break;
6329 			}
6330 		}
6331 	} else if (vdev->opmode == wlan_op_mode_sta) {
6332 		peer = TAILQ_FIRST(&vdev->peer_list);
6333 		peer->wds_ecm.wds_rx_filter = 1;
6334 		peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
6335 		peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
6336 	}
6337 }
6338 
6339 /**
6340  * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
6341  *
6342  * @peer_handle - datapath peer handle
6343  * @wds_tx_ucast: policy for unicast transmission
6344  * @wds_tx_mcast: policy for multicast transmission
6345  *
6346  * Return: void
6347  */
6348 void
6349 dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
6350 		int wds_tx_ucast, int wds_tx_mcast)
6351 {
6352 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6353 	if (wds_tx_ucast || wds_tx_mcast) {
6354 		peer->wds_enabled = 1;
6355 		peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
6356 		peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
6357 	} else {
6358 		peer->wds_enabled = 0;
6359 		peer->wds_ecm.wds_tx_ucast_4addr = 0;
6360 		peer->wds_ecm.wds_tx_mcast_4addr = 0;
6361 	}
6362 
6363 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6364 			FL("Policy Update set to :\
6365 				peer->wds_enabled %d\
6366 				peer->wds_ecm.wds_tx_ucast_4addr %d\
6367 				peer->wds_ecm.wds_tx_mcast_4addr %d\n"),
6368 				peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
6369 				peer->wds_ecm.wds_tx_mcast_4addr);
6370 	return;
6371 }
6372 #endif
6373 
6374 static struct cdp_wds_ops dp_ops_wds = {
6375 	.vdev_set_wds = dp_vdev_set_wds,
6376 #ifdef WDS_VENDOR_EXTENSION
6377 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
6378 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
6379 #endif
6380 };
6381 
6382 /*
6383  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
6384  * @soc - datapath soc handle
6385  * @peer - datapath peer handle
6386  *
6387  * Delete the AST entries belonging to a peer
6388  */
6389 #ifdef FEATURE_WDS
6390 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
6391 		struct dp_peer *peer)
6392 {
6393 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
6394 
6395 	qdf_spin_lock_bh(&soc->ast_lock);
6396 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
6397 		dp_peer_del_ast(soc, ast_entry);
6398 
6399 	qdf_spin_unlock_bh(&soc->ast_lock);
6400 }
6401 #else
6402 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
6403 		struct dp_peer *peer)
6404 {
6405 }
6406 #endif
6407 
6408 /*
6409  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
6410  * @vdev_handle - datapath vdev handle
6411  * @callback - callback function
6412  * @ctxt: callback context
6413  *
6414  */
6415 static void
6416 dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
6417 		       ol_txrx_data_tx_cb callback, void *ctxt)
6418 {
6419 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6420 
6421 	vdev->tx_non_std_data_callback.func = callback;
6422 	vdev->tx_non_std_data_callback.ctxt = ctxt;
6423 }
6424 
6425 /**
6426  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
6427  * @pdev_hdl: datapath pdev handle
6428  *
6429  * Return: opaque pointer to dp txrx handle
6430  */
6431 static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
6432 {
6433 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
6434 
6435 	return pdev->dp_txrx_handle;
6436 }
6437 
6438 /**
6439  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
6440  * @pdev_hdl: datapath pdev handle
6441  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
6442  *
6443  * Return: void
6444  */
6445 static void
6446 dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
6447 {
6448 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
6449 
6450 	pdev->dp_txrx_handle = dp_txrx_hdl;
6451 }
6452 
6453 /**
6454  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
6455  * @soc_handle: datapath soc handle
6456  *
6457  * Return: opaque pointer to external dp (non-core DP)
6458  */
6459 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
6460 {
6461 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
6462 
6463 	return soc->external_txrx_handle;
6464 }
6465 
6466 /**
6467  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
6468  * @soc_handle: datapath soc handle
6469  * @txrx_handle: opaque pointer to external dp (non-core DP)
6470  *
6471  * Return: void
6472  */
6473 static void
6474 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
6475 {
6476 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
6477 
6478 	soc->external_txrx_handle = txrx_handle;
6479 }
6480 
6481 #ifdef CONFIG_WIN
6482 static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
6483 {
6484 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
6485 	struct dp_peer *peer = (struct dp_peer *) peer_hdl;
6486 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
6487 
6488 	peer->delete_in_progress = true;
6489 	dp_peer_delete_ast_entries(soc, peer);
6490 }
6491 #endif
6492 
6493 #ifdef ATH_SUPPORT_NAC_RSSI
6494 static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
6495 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
6496 		uint8_t chan_num)
6497 {
6498 
6499 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6500 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6501 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
6502 
6503 	pdev->nac_rssi_filtering = 1;
6504 	/* Store address of NAC (neighbour peer) which will be checked
6505 	 * against TA of received packets.
6506 	 */
6507 
6508 	if (cmd == CDP_NAC_PARAM_ADD) {
6509 		qdf_mem_copy(vdev->cdp_nac_rssi.client_mac,
6510 				client_macaddr, DP_MAC_ADDR_LEN);
6511 		vdev->cdp_nac_rssi_enabled = 1;
6512 	} else if (cmd == CDP_NAC_PARAM_DEL) {
6513 		if (!qdf_mem_cmp(vdev->cdp_nac_rssi.client_mac,
6514 			client_macaddr, DP_MAC_ADDR_LEN)) {
6515 				/* delete this peer from the list */
6516 			qdf_mem_zero(vdev->cdp_nac_rssi.client_mac,
6517 				DP_MAC_ADDR_LEN);
6518 		}
6519 		vdev->cdp_nac_rssi_enabled = 0;
6520 	}
6521 
6522 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
6523 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
6524 			(vdev->pdev->osif_pdev, vdev->vdev_id, cmd, bssid);
6525 
6526 	return QDF_STATUS_SUCCESS;
6527 }
6528 #endif
6529 
6530 static struct cdp_cmn_ops dp_ops_cmn = {
6531 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
6532 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
6533 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
6534 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
6535 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
6536 	.txrx_peer_create = dp_peer_create_wifi3,
6537 	.txrx_peer_setup = dp_peer_setup_wifi3,
6538 #ifdef CONFIG_WIN
6539 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
6540 #else
6541 	.txrx_peer_teardown = NULL,
6542 #endif
6543 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
6544 	.txrx_peer_del_ast = dp_peer_del_ast_wifi3,
6545 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
6546 	.txrx_peer_ast_hash_find = dp_peer_ast_hash_find_wifi3,
6547 	.txrx_peer_ast_get_pdev_id = dp_peer_ast_get_pdev_id_wifi3,
6548 	.txrx_peer_ast_get_next_hop = dp_peer_ast_get_next_hop_wifi3,
6549 	.txrx_peer_ast_set_type = dp_peer_ast_set_type_wifi3,
6550 	.txrx_peer_delete = dp_peer_delete_wifi3,
6551 	.txrx_vdev_register = dp_vdev_register_wifi3,
6552 	.txrx_soc_detach = dp_soc_detach_wifi3,
6553 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
6554 	.txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
6555 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
6556 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
6557 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
6558 	.delba_process = dp_delba_process_wifi3,
6559 	.set_addba_response = dp_set_addba_response,
6560 	.get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
6561 	.flush_cache_rx_queue = NULL,
6562 	/* TODO: get API's for dscp-tid need to be added*/
6563 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
6564 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
6565 	.txrx_stats_request = dp_txrx_stats_request,
6566 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
6567 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
6568 	.txrx_set_nac = dp_set_nac,
6569 	.txrx_get_tx_pending = dp_get_tx_pending,
6570 	.txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
6571 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
6572 	.display_stats = dp_txrx_dump_stats,
6573 	.txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
6574 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
6575 #ifdef DP_INTR_POLL_BASED
6576 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
6577 #else
6578 	.txrx_intr_attach = dp_soc_interrupt_attach,
6579 #endif
6580 	.txrx_intr_detach = dp_soc_interrupt_detach,
6581 	.set_pn_check = dp_set_pn_check_wifi3,
6582 	.update_config_parameters = dp_update_config_parameters,
6583 	/* TODO: Add other functions */
6584 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
6585 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
6586 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
6587 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
6588 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
6589 	.tx_send = dp_tx_send,
6590 };
6591 
6592 static struct cdp_ctrl_ops dp_ops_ctrl = {
6593 	.txrx_peer_authorize = dp_peer_authorize,
6594 #ifdef QCA_SUPPORT_SON
6595 	.txrx_set_inact_params = dp_set_inact_params,
6596 	.txrx_start_inact_timer = dp_start_inact_timer,
6597 	.txrx_set_overload = dp_set_overload,
6598 	.txrx_peer_is_inact = dp_peer_is_inact,
6599 	.txrx_mark_peer_inact = dp_mark_peer_inact,
6600 #endif
6601 	.txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
6602 	.txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
6603 #ifdef MESH_MODE_SUPPORT
6604 	.txrx_set_mesh_mode  = dp_peer_set_mesh_mode,
6605 	.txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
6606 #endif
6607 	.txrx_set_vdev_param = dp_set_vdev_param,
6608 	.txrx_peer_set_nawds = dp_peer_set_nawds,
6609 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
6610 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
6611 	.txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
6612 	.txrx_update_filter_neighbour_peers =
6613 		dp_update_filter_neighbour_peers,
6614 	.txrx_get_sec_type = dp_get_sec_type,
6615 	/* TODO: Add other functions */
6616 	.txrx_wdi_event_sub = dp_wdi_event_sub,
6617 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
6618 #ifdef WDI_EVENT_ENABLE
6619 	.txrx_get_pldev = dp_get_pldev,
6620 #endif
6621 	.txrx_set_pdev_param = dp_set_pdev_param,
6622 #ifdef ATH_SUPPORT_NAC_RSSI
6623 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
6624 #endif
6625 };
6626 
6627 static struct cdp_me_ops dp_ops_me = {
6628 #ifdef ATH_SUPPORT_IQUE
6629 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
6630 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
6631 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
6632 #endif
6633 };
6634 
6635 static struct cdp_mon_ops dp_ops_mon = {
6636 	.txrx_monitor_set_filter_ucast_data = NULL,
6637 	.txrx_monitor_set_filter_mcast_data = NULL,
6638 	.txrx_monitor_set_filter_non_data = NULL,
6639 	.txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
6640 	.txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
6641 	.txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
6642 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
6643 	/* Added support for HK advance filter */
6644 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
6645 };
6646 
6647 static struct cdp_host_stats_ops dp_ops_host_stats = {
6648 	.txrx_per_peer_stats = dp_get_host_peer_stats,
6649 	.get_fw_peer_stats = dp_get_fw_peer_stats,
6650 	.get_htt_stats = dp_get_htt_stats,
6651 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
6652 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
6653 	.txrx_stats_publish = dp_txrx_stats_publish,
6654 	/* TODO */
6655 };
6656 
6657 static struct cdp_raw_ops dp_ops_raw = {
6658 	/* TODO */
6659 };
6660 
6661 #ifdef CONFIG_WIN
6662 static struct cdp_pflow_ops dp_ops_pflow = {
6663 	/* TODO */
6664 };
6665 #endif /* CONFIG_WIN */
6666 
6667 #ifdef FEATURE_RUNTIME_PM
6668 /**
6669  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
6670  * @opaque_pdev: DP pdev context
6671  *
6672  * DP is ready to runtime suspend if there are no pending TX packets.
6673  *
6674  * Return: QDF_STATUS
6675  */
6676 static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
6677 {
6678 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
6679 	struct dp_soc *soc = pdev->soc;
6680 
6681 	/* Call DP TX flow control API to check if there is any
6682 	   pending packets */
6683 
6684 	if (soc->intr_mode == DP_INTR_POLL)
6685 		qdf_timer_stop(&soc->int_timer);
6686 
6687 	return QDF_STATUS_SUCCESS;
6688 }
6689 
6690 /**
6691  * dp_runtime_resume() - ensure DP is ready to runtime resume
6692  * @opaque_pdev: DP pdev context
6693  *
6694  * Resume DP for runtime PM.
6695  *
6696  * Return: QDF_STATUS
6697  */
6698 static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
6699 {
6700 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
6701 	struct dp_soc *soc = pdev->soc;
6702 	void *hal_srng;
6703 	int i;
6704 
6705 	if (soc->intr_mode == DP_INTR_POLL)
6706 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
6707 
6708 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
6709 		hal_srng = soc->tcl_data_ring[i].hal_srng;
6710 		if (hal_srng) {
6711 			/* We actually only need to acquire the lock */
6712 			hal_srng_access_start(soc->hal_soc, hal_srng);
6713 			/* Update SRC ring head pointer for HW to send
6714 			   all pending packets */
6715 			hal_srng_access_end(soc->hal_soc, hal_srng);
6716 		}
6717 	}
6718 
6719 	return QDF_STATUS_SUCCESS;
6720 }
6721 #endif /* FEATURE_RUNTIME_PM */
6722 
6723 static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
6724 {
6725 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
6726 	struct dp_soc *soc = pdev->soc;
6727 
6728 	if (soc->intr_mode == DP_INTR_POLL)
6729 		qdf_timer_stop(&soc->int_timer);
6730 
6731 	return QDF_STATUS_SUCCESS;
6732 }
6733 
6734 static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
6735 {
6736 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
6737 	struct dp_soc *soc = pdev->soc;
6738 
6739 	if (soc->intr_mode == DP_INTR_POLL)
6740 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
6741 
6742 	return QDF_STATUS_SUCCESS;
6743 }
6744 
6745 #ifndef CONFIG_WIN
6746 static struct cdp_misc_ops dp_ops_misc = {
6747 	.tx_non_std = dp_tx_non_std,
6748 	.get_opmode = dp_get_opmode,
6749 #ifdef FEATURE_RUNTIME_PM
6750 	.runtime_suspend = dp_runtime_suspend,
6751 	.runtime_resume = dp_runtime_resume,
6752 #endif /* FEATURE_RUNTIME_PM */
6753 	.pkt_log_init = dp_pkt_log_init,
6754 	.pkt_log_con_service = dp_pkt_log_con_service,
6755 };
6756 
6757 static struct cdp_flowctl_ops dp_ops_flowctl = {
6758 	/* WIFI 3.0 DP implement as required. */
6759 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6760 	.register_pause_cb = dp_txrx_register_pause_cb,
6761 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
6762 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
6763 };
6764 
6765 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
6766 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
6767 };
6768 
6769 #ifdef IPA_OFFLOAD
6770 static struct cdp_ipa_ops dp_ops_ipa = {
6771 	.ipa_get_resource = dp_ipa_get_resource,
6772 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
6773 	.ipa_op_response = dp_ipa_op_response,
6774 	.ipa_register_op_cb = dp_ipa_register_op_cb,
6775 	.ipa_get_stat = dp_ipa_get_stat,
6776 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
6777 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
6778 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
6779 	.ipa_setup = dp_ipa_setup,
6780 	.ipa_cleanup = dp_ipa_cleanup,
6781 	.ipa_setup_iface = dp_ipa_setup_iface,
6782 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
6783 	.ipa_enable_pipes = dp_ipa_enable_pipes,
6784 	.ipa_disable_pipes = dp_ipa_disable_pipes,
6785 	.ipa_set_perf_level = dp_ipa_set_perf_level
6786 };
6787 #endif
6788 
6789 static struct cdp_bus_ops dp_ops_bus = {
6790 	.bus_suspend = dp_bus_suspend,
6791 	.bus_resume = dp_bus_resume
6792 };
6793 
6794 static struct cdp_ocb_ops dp_ops_ocb = {
6795 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
6796 };
6797 
6798 
6799 static struct cdp_throttle_ops dp_ops_throttle = {
6800 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
6801 };
6802 
6803 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
6804 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
6805 };
6806 
6807 static struct cdp_cfg_ops dp_ops_cfg = {
6808 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
6809 };
6810 
6811 /*
6812  * dp_wrapper_peer_get_ref_by_addr - wrapper function to get to peer
6813  * @dev: physical device instance
6814  * @peer_mac_addr: peer mac address
6815  * @local_id: local id for the peer
6816  * @debug_id: to track enum peer access
6817 
6818  * Return: peer instance pointer
6819  */
6820 static inline void *
6821 dp_wrapper_peer_get_ref_by_addr(struct cdp_pdev *dev, u8 *peer_mac_addr,
6822 				u8 *local_id,
6823 				enum peer_debug_id_type debug_id)
6824 {
6825 	/*
6826 	 * Currently this function does not implement the "get ref"
6827 	 * functionality and is mapped to dp_find_peer_by_addr which does not
6828 	 * increment the peer ref count. So the peer state is uncertain after
6829 	 * calling this API. The functionality needs to be implemented.
6830 	 * Accordingly the corresponding release_ref function is NULL.
6831 	 */
6832 	return dp_find_peer_by_addr(dev, peer_mac_addr, local_id);
6833 }
6834 
6835 static struct cdp_peer_ops dp_ops_peer = {
6836 	.register_peer = dp_register_peer,
6837 	.clear_peer = dp_clear_peer,
6838 	.find_peer_by_addr = dp_find_peer_by_addr,
6839 	.find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
6840 	.peer_get_ref_by_addr = dp_wrapper_peer_get_ref_by_addr,
6841 	.peer_release_ref = NULL,
6842 	.local_peer_id = dp_local_peer_id,
6843 	.peer_find_by_local_id = dp_peer_find_by_local_id,
6844 	.peer_state_update = dp_peer_state_update,
6845 	.get_vdevid = dp_get_vdevid,
6846 	.get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
6847 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
6848 	.get_vdev_for_peer = dp_get_vdev_for_peer,
6849 	.get_peer_state = dp_get_peer_state,
6850 	.last_assoc_received = dp_get_last_assoc_received,
6851 	.last_disassoc_received = dp_get_last_disassoc_received,
6852 	.last_deauth_received = dp_get_last_deauth_received,
6853 };
6854 #endif
6855 
6856 static struct cdp_ops dp_txrx_ops = {
6857 	.cmn_drv_ops = &dp_ops_cmn,
6858 	.ctrl_ops = &dp_ops_ctrl,
6859 	.me_ops = &dp_ops_me,
6860 	.mon_ops = &dp_ops_mon,
6861 	.host_stats_ops = &dp_ops_host_stats,
6862 	.wds_ops = &dp_ops_wds,
6863 	.raw_ops = &dp_ops_raw,
6864 #ifdef CONFIG_WIN
6865 	.pflow_ops = &dp_ops_pflow,
6866 #endif /* CONFIG_WIN */
6867 #ifndef CONFIG_WIN
6868 	.misc_ops = &dp_ops_misc,
6869 	.cfg_ops = &dp_ops_cfg,
6870 	.flowctl_ops = &dp_ops_flowctl,
6871 	.l_flowctl_ops = &dp_ops_l_flowctl,
6872 #ifdef IPA_OFFLOAD
6873 	.ipa_ops = &dp_ops_ipa,
6874 #endif
6875 	.bus_ops = &dp_ops_bus,
6876 	.ocb_ops = &dp_ops_ocb,
6877 	.peer_ops = &dp_ops_peer,
6878 	.throttle_ops = &dp_ops_throttle,
6879 	.mob_stats_ops = &dp_ops_mob_stats,
6880 #endif
6881 };
6882 
6883 /*
6884  * dp_soc_set_txrx_ring_map()
6885  * @dp_soc: DP handler for soc
6886  *
6887  * Return: Void
6888  */
6889 static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
6890 {
6891 	uint32_t i;
6892 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
6893 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_DEFAULT_MAP][i];
6894 	}
6895 }
6896 
6897 /*
6898  * dp_soc_attach_wifi3() - Attach txrx SOC
6899  * @ctrl_psoc:	Opaque SOC handle from control plane
6900  * @htc_handle:	Opaque HTC handle
6901  * @hif_handle:	Opaque HIF handle
6902  * @qdf_osdev:	QDF device
6903  *
6904  * Return: DP SOC handle on success, NULL on failure
6905  */
6906 /*
6907  * Local prototype added to temporarily address warning caused by
6908  * -Wmissing-prototypes. A more correct solution, namely to expose
6909  * a prototype in an appropriate header file, will come later.
6910  */
6911 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
6912 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
6913 	struct ol_if_ops *ol_ops);
6914 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
6915 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
6916 	struct ol_if_ops *ol_ops)
6917 {
6918 	struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
6919 
6920 	if (!soc) {
6921 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6922 			FL("DP SOC memory allocation failed"));
6923 		goto fail0;
6924 	}
6925 
6926 	soc->cdp_soc.ops = &dp_txrx_ops;
6927 	soc->cdp_soc.ol_ops = ol_ops;
6928 	soc->ctrl_psoc = ctrl_psoc;
6929 	soc->osdev = qdf_osdev;
6930 	soc->hif_handle = hif_handle;
6931 
6932 	soc->hal_soc = hif_get_hal_handle(hif_handle);
6933 	soc->htt_handle = htt_soc_attach(soc, ctrl_psoc, htc_handle,
6934 		soc->hal_soc, qdf_osdev);
6935 	if (!soc->htt_handle) {
6936 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6937 			FL("HTT attach failed"));
6938 		goto fail1;
6939 	}
6940 
6941 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach();
6942 	if (!soc->wlan_cfg_ctx) {
6943 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6944 				FL("wlan_cfg_soc_attach failed"));
6945 		goto fail2;
6946 	}
6947 
6948 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx, rx_hash);
6949 	soc->cce_disable = false;
6950 
6951 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
6952 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
6953 				CDP_CFG_MAX_PEER_ID);
6954 
6955 		if (ret != -EINVAL) {
6956 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
6957 		}
6958 
6959 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
6960 				CDP_CFG_CCE_DISABLE);
6961 		if (ret == 1)
6962 			soc->cce_disable = true;
6963 	}
6964 
6965 	qdf_spinlock_create(&soc->peer_ref_mutex);
6966 
6967 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
6968 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
6969 
6970 	/* fill the tx/rx cpu ring map*/
6971 	dp_soc_set_txrx_ring_map(soc);
6972 
6973 	qdf_spinlock_create(&soc->htt_stats.lock);
6974 	/* initialize work queue for stats processing */
6975 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
6976 
6977 	/*Initialize inactivity timer for wifison */
6978 	dp_init_inact_timer(soc);
6979 
6980 	return (void *)soc;
6981 
6982 fail2:
6983 	htt_soc_detach(soc->htt_handle);
6984 fail1:
6985 	qdf_mem_free(soc);
6986 fail0:
6987 	return NULL;
6988 }
6989 
6990 /*
6991  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
6992  *
6993  * @soc: handle to DP soc
6994  * @mac_id: MAC id
6995  *
6996  * Return: Return pdev corresponding to MAC
6997  */
6998 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
6999 {
7000 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
7001 		return soc->pdev_list[mac_id];
7002 
7003 	/* Typically for MCL as there only 1 PDEV*/
7004 	return soc->pdev_list[0];
7005 }
7006 
7007 /*
7008  * dp_get_ring_id_for_mac_id() -  Return pdev for mac_id
7009  *
7010  * @soc: handle to DP soc
7011  * @mac_id: MAC id
7012  *
7013  * Return: ring id
7014  */
7015 int dp_get_ring_id_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
7016 {
7017 	/*
7018 	 * Single pdev using both MACs will operate on both MAC rings,
7019 	 * which is the case for MCL.
7020 	 */
7021 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
7022 		return mac_id;
7023 
7024 	/* For WIN each PDEV will operate one ring, so index is zero. */
7025 	return 0;
7026 }
7027 
7028 /*
7029  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
7030  * @soc:		DP SoC context
7031  * @max_mac_rings:	No of MAC rings
7032  *
7033  * Return: None
7034  */
7035 static
7036 void dp_is_hw_dbs_enable(struct dp_soc *soc,
7037 				int *max_mac_rings)
7038 {
7039 	bool dbs_enable = false;
7040 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
7041 		dbs_enable = soc->cdp_soc.ol_ops->
7042 		is_hw_dbs_2x2_capable(soc->ctrl_psoc);
7043 
7044 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
7045 }
7046 
7047 /*
7048 * dp_set_pktlog_wifi3() - attach txrx vdev
7049 * @pdev: Datapath PDEV handle
7050 * @event: which event's notifications are being subscribed to
7051 * @enable: WDI event subscribe or not. (True or False)
7052 *
7053 * Return: Success, NULL on failure
7054 */
7055 #ifdef WDI_EVENT_ENABLE
7056 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
7057 	bool enable)
7058 {
7059 	struct dp_soc *soc = pdev->soc;
7060 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
7061 	int max_mac_rings = wlan_cfg_get_num_mac_rings
7062 					(pdev->wlan_cfg_ctx);
7063 	uint8_t mac_id = 0;
7064 
7065 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
7066 
7067 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7068 			FL("Max_mac_rings %d \n"),
7069 			max_mac_rings);
7070 
7071 	if (enable) {
7072 		switch (event) {
7073 		case WDI_EVENT_RX_DESC:
7074 			if (pdev->monitor_vdev) {
7075 				/* Nothing needs to be done if monitor mode is
7076 				 * enabled
7077 				 */
7078 				return 0;
7079 			}
7080 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
7081 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
7082 				htt_tlv_filter.mpdu_start = 1;
7083 				htt_tlv_filter.msdu_start = 1;
7084 				htt_tlv_filter.msdu_end = 1;
7085 				htt_tlv_filter.mpdu_end = 1;
7086 				htt_tlv_filter.packet_header = 1;
7087 				htt_tlv_filter.attention = 1;
7088 				htt_tlv_filter.ppdu_start = 1;
7089 				htt_tlv_filter.ppdu_end = 1;
7090 				htt_tlv_filter.ppdu_end_user_stats = 1;
7091 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
7092 				htt_tlv_filter.ppdu_end_status_done = 1;
7093 				htt_tlv_filter.enable_fp = 1;
7094 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
7095 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
7096 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
7097 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
7098 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
7099 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
7100 
7101 				for (mac_id = 0; mac_id < max_mac_rings;
7102 								mac_id++) {
7103 					htt_h2t_rx_ring_cfg(soc->htt_handle,
7104 							pdev->pdev_id + mac_id,
7105 							pdev->rxdma_mon_status_ring
7106 							.hal_srng,
7107 							RXDMA_MONITOR_STATUS,
7108 							RX_BUFFER_SIZE,
7109 							&htt_tlv_filter);
7110 
7111 				}
7112 
7113 				if (soc->reap_timer_init)
7114 					qdf_timer_mod(&soc->mon_reap_timer,
7115 					DP_INTR_POLL_TIMER_MS);
7116 			}
7117 			break;
7118 
7119 		case WDI_EVENT_LITE_RX:
7120 			if (pdev->monitor_vdev) {
7121 				/* Nothing needs to be done if monitor mode is
7122 				 * enabled
7123 				 */
7124 				return 0;
7125 			}
7126 
7127 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
7128 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
7129 
7130 				htt_tlv_filter.ppdu_start = 1;
7131 				htt_tlv_filter.ppdu_end = 1;
7132 				htt_tlv_filter.ppdu_end_user_stats = 1;
7133 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
7134 				htt_tlv_filter.ppdu_end_status_done = 1;
7135 				htt_tlv_filter.mpdu_start = 1;
7136 				htt_tlv_filter.enable_fp = 1;
7137 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
7138 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
7139 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
7140 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
7141 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
7142 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
7143 
7144 				for (mac_id = 0; mac_id < max_mac_rings;
7145 								mac_id++) {
7146 					htt_h2t_rx_ring_cfg(soc->htt_handle,
7147 					pdev->pdev_id + mac_id,
7148 					pdev->rxdma_mon_status_ring
7149 					.hal_srng,
7150 					RXDMA_MONITOR_STATUS,
7151 					RX_BUFFER_SIZE_PKTLOG_LITE,
7152 					&htt_tlv_filter);
7153 				}
7154 
7155 				if (soc->reap_timer_init)
7156 					qdf_timer_mod(&soc->mon_reap_timer,
7157 					DP_INTR_POLL_TIMER_MS);
7158 			}
7159 			break;
7160 
7161 		case WDI_EVENT_LITE_T2H:
7162 			if (pdev->monitor_vdev) {
7163 				/* Nothing needs to be done if monitor mode is
7164 				 * enabled
7165 				 */
7166 				return 0;
7167 			}
7168 
7169 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
7170 				pdev->pktlog_ppdu_stats = true;
7171 				dp_h2t_cfg_stats_msg_send(pdev,
7172 						DP_PPDU_TXLITE_STATS_BITMASK_CFG,
7173 						pdev->pdev_id + mac_id);
7174 			}
7175 			break;
7176 
7177 		default:
7178 			/* Nothing needs to be done for other pktlog types */
7179 			break;
7180 		}
7181 	} else {
7182 		switch (event) {
7183 		case WDI_EVENT_RX_DESC:
7184 		case WDI_EVENT_LITE_RX:
7185 			if (pdev->monitor_vdev) {
7186 				/* Nothing needs to be done if monitor mode is
7187 				 * enabled
7188 				 */
7189 				return 0;
7190 			}
7191 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
7192 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
7193 
7194 				for (mac_id = 0; mac_id < max_mac_rings;
7195 								mac_id++) {
7196 					htt_h2t_rx_ring_cfg(soc->htt_handle,
7197 							pdev->pdev_id + mac_id,
7198 							pdev->rxdma_mon_status_ring
7199 							.hal_srng,
7200 							RXDMA_MONITOR_STATUS,
7201 							RX_BUFFER_SIZE,
7202 							&htt_tlv_filter);
7203 				}
7204 
7205 				if (soc->reap_timer_init)
7206 					qdf_timer_stop(&soc->mon_reap_timer);
7207 			}
7208 			break;
7209 		case WDI_EVENT_LITE_T2H:
7210 			if (pdev->monitor_vdev) {
7211 				/* Nothing needs to be done if monitor mode is
7212 				 * enabled
7213 				 */
7214 				return 0;
7215 			}
7216 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
7217 			 * passing value 0. Once these macros will define in htt
7218 			 * header file will use proper macros
7219 			*/
7220 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
7221 				pdev->pktlog_ppdu_stats = false;
7222 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
7223 					dp_h2t_cfg_stats_msg_send(pdev, 0,
7224 							pdev->pdev_id + mac_id);
7225 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
7226 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
7227 							pdev->pdev_id + mac_id);
7228 				} else if (pdev->enhanced_stats_en) {
7229 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
7230 							pdev->pdev_id + mac_id);
7231 				}
7232 			}
7233 
7234 			break;
7235 		default:
7236 			/* Nothing needs to be done for other pktlog types */
7237 			break;
7238 		}
7239 	}
7240 	return 0;
7241 }
7242 #endif
7243 
7244 #ifdef CONFIG_MCL
7245 /*
7246  * dp_service_mon_rings()- timer to reap monitor rings
7247  * reqd as we are not getting ppdu end interrupts
7248  * @arg: SoC Handle
7249  *
7250  * Return:
7251  *
7252  */
7253 static void dp_service_mon_rings(void *arg)
7254 {
7255 	struct dp_soc *soc = (struct dp_soc *) arg;
7256 	int ring = 0, work_done;
7257 
7258 	work_done = dp_mon_process(soc, ring, QCA_NAPI_BUDGET);
7259 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
7260 		FL("Reaped %d descs from Monitor rings"), work_done);
7261 
7262 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
7263 }
7264 
7265 #ifndef REMOVE_PKT_LOG
7266 /**
7267  * dp_pkt_log_init() - API to initialize packet log
7268  * @ppdev: physical device handle
7269  * @scn: HIF context
7270  *
7271  * Return: none
7272  */
7273 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
7274 {
7275 	struct dp_pdev *handle = (struct dp_pdev *)ppdev;
7276 
7277 	if (handle->pkt_log_init) {
7278 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7279 			 "%s: Packet log not initialized", __func__);
7280 		return;
7281 	}
7282 
7283 	pktlog_sethandle(&handle->pl_dev, scn);
7284 	pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
7285 
7286 	if (pktlogmod_init(scn)) {
7287 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7288 			 "%s: pktlogmod_init failed", __func__);
7289 		handle->pkt_log_init = false;
7290 	} else {
7291 		handle->pkt_log_init = true;
7292 	}
7293 }
7294 
7295 /**
7296  * dp_pkt_log_con_service() - connect packet log service
7297  * @ppdev: physical device handle
7298  * @scn: device context
7299  *
7300  * Return: none
7301  */
7302 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
7303 {
7304 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
7305 
7306 	dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
7307 	pktlog_htc_attach();
7308 }
7309 
7310 /**
7311  * dp_pktlogmod_exit() - API to cleanup pktlog info
7312  * @handle: Pdev handle
7313  *
7314  * Return: none
7315  */
7316 static void dp_pktlogmod_exit(struct dp_pdev *handle)
7317 {
7318 	void *scn = (void *)handle->soc->hif_handle;
7319 
7320 	if (!scn) {
7321 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7322 			 "%s: Invalid hif(scn) handle", __func__);
7323 		return;
7324 	}
7325 
7326 	pktlogmod_exit(scn);
7327 	handle->pkt_log_init = false;
7328 }
7329 #endif
7330 #else
7331 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
7332 #endif
7333 
7334